fix: docs
diff --git a/docs/ambari-design/kerberos/kerberos_descriptor.md b/docs/ambari-design/kerberos/kerberos_descriptor.md
index f054ab0..c1ff6d8 100644
--- a/docs/ambari-design/kerberos/kerberos_descriptor.md
+++ b/docs/ambari-design/kerberos/kerberos_descriptor.md
@@ -1,6 +1,7 @@
 ---
 title: The Kerberos Descriptor
 ---
+
 <!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
@@ -18,19 +19,18 @@
 limitations under the License.
 -->
 
-
 - [Introduction](index.md)
 - [The Kerberos Descriptor](#the-kerberos-descriptor)
   - [Components of a Kerberos Descriptor](#components-of-a-kerberos-descriptor)
     - [Stack-level Properties](#stack-level-properties)
     - [Stack-level Identities](#stack-level-identities)
     - [Stack-level Auth-to-local-properties](#stack-level-auth-to-local-properties)
-    - [Stack-level Configurations](#stack-level-configuratons)
+    - [Stack-level Configurations](#stack-level-configurations)
     - [Services](#services)
     - [Service-level Identities](#service-level-identities)
     - [Service-level Auth-to-local-properties](#service-level-auth-to-local-properties)
     - [Service-level Configurations](#service-level-configurations)
-    - [Components](#service-components)
+    - [Components](#components)
     - [Component-level Identities](#component-level-identities)
     - [Component-level Auth-to-local-properties](#component-level-auth-to-local-properties)
     - [Component-level Configurations](#component-level-configurations)
@@ -51,25 +51,15 @@
 
 ## The Kerberos Descriptor
 
-The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable
-or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should
-be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to
-be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details
-in the service-level descriptors.
+The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details in the service-level descriptors.
 
-For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This
-ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the
-relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
+For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
 
-For a component of a service to be Kerberized, there must be an entry for it in its containing service's
-service-level descriptor. This allows for some of a services' components to be managed and other
-components of that service to be ignored by the automated Kerberos facility.
+For a component of a service to be Kerberized, there must be an entry for it in its containing service's service-level descriptor. This allows for some of a services' components to be managed and other components of that service to be ignored by the automated Kerberos facility.
 
-Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full
-descriptor - partial descriptors are not allowed.
+Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full descriptor - partial descriptors are not allowed.
 
-A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors,
-and any updates from user input) has the following structure:
+A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors, and any updates from user input) has the following structure:
 
 - Stack-level Properties
 - Stack-level Identities
@@ -84,9 +74,7 @@
     - Component-level Auth-to-local-properties
     - Component-level Configurations
 
-Each level of the descriptor inherits the data from its parent. This data, however, may be overridden
-if necessary. For example, a component will inherit the configurations and identities of its container
-service; which in turn inherits the configurations and identities from the stack.
+Each level of the descriptor inherits the data from its parent. This data, however, may be overridden if necessary. For example, a component will inherit the configurations and identities of its container service; which in turn inherits the configurations and identities from the stack.
 
 <a name="components-of-a-kerberos-descriptor"></a>
 
@@ -96,13 +84,9 @@
 
 #### Stack-level Properties
 
-Stack-level properties is an optional set of name/value pairs that can be used in variable replacements.
-For example, if a property named "**_property1_**" exists with the value of "**_value1_**", then any instance of
-"**_${property1}_**" within a configuration property name or configuration property value will be replaced
-with "**_value1_**".
+Stack-level properties is an optional set of name/value pairs that can be used in variable replacements. For example, if a property named `**_property1_**` exists with the value of `**_value1_**`, then any instance of `**_${property1}_**` within a configuration property name or configuration property value will be replaced with `**_value1_**`.
 
-This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by
-lower-level descriptors.
+This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by lower-level descriptors.
 
 See [properties](#properties).
 
@@ -110,15 +94,7 @@
 
 #### Stack-level Identities
 
-Stack-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all services in the stack. An example of such an identity is the
-Ambari smoke test user, which is used by all services to perform service check operations. Service-
-and component-level identities may reference (and specialize) stack-level identities using the
-identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity
-with the name "smokeuser", then a service or a component may create an identity block that references
-and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser".  Within
-this identity block details of the identity may be and overwritten as necessary. This does not alter
-the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
+Stack-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all services in the stack. An example of such an identity is the Ambari smoke test user, which is used by all services to perform service check operations. Service- and component-level identities may reference (and specialize) stack-level identities using the identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity with the name "smokeuser", then a service or a component may create an identity block that references and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser". Within this identity block details of the identity may be and overwritten as necessary. This does not alter the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -126,9 +102,7 @@
 
 #### Stack-level Auth-to-local-properties
 
-Stack-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Stack-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -136,11 +110,7 @@
 
 #### Stack-level Configurations
 
-Stack-level configurations is an optional configurations block containing a list of zero or more
-configuration descriptors that are common among all services in the stack. Configuration descriptors
-are overridable due to the structure of the data.  However, overriding configuration properties may
-create undesired behavior since it is not known until after the Kerberization process is complete
-what value a property will have.
+Stack-level configurations is an optional configurations block containing a list of zero or more configuration descriptors that are common among all services in the stack. Configuration descriptors are overridable due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -148,8 +118,7 @@
 
 #### Services
 
-Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not
-list any services; however a service-level Kerberos Descriptor should contain at least one.
+Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not list any services; however a service-level Kerberos Descriptor should contain at least one.
 
 See [services](#services).
 
@@ -157,16 +126,9 @@
 
 #### Service-level Identities
 
-Service-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all components of the service. Component-level identities may
-reference (and specialize) service-level identities by specifying a relative or an absolute path
-to it.
+Service-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all components of the service. Component-level identities may reference (and specialize) service-level identities by specifying a relative or an absolute path to it.
 
-For example if there was a service-level identity with the name "service_identity", then a child
-component may create an identity block that references and specializes it by setting its "reference"
-attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as
-necessary. This does not override the service-level identity, it essentially creates a copy of it and
-updates the copy's properties.
+For example if there was a service-level identity with the name "service_identity", then a child component may create an identity block that references and specializes it by setting its "reference" attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as necessary. This does not override the service-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 ##### Examples
 
@@ -186,8 +148,7 @@
 }
 ```
 
-**Note**: By using the absolute path to an identity, any service-level identity may be referenced by
-any other service or component.
+**Note**: By using the absolute path to an identity, any service-level identity may be referenced by any other service or component.
 
 See [identities](#identities).
 
@@ -195,9 +156,7 @@
 
 #### Service-level Auth-to-local-properties
 
-Service-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Service-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -205,11 +164,7 @@
 
 #### Service-level Configurations
 
-Service-level configurations is an optional configurations block listing of zero or more configuration
-descriptors that are common among all components within a service. Configuration descriptors may be
-overridden due to the structure of the data. However, overriding configuration properties may create
-undesired behavior since it is not known until after the Kerberization process is complete what value
-a property will have.
+Service-level configurations is an optional configurations block listing of zero or more configuration descriptors that are common among all components within a service. Configuration descriptors may be overridden due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -225,11 +180,7 @@
 
 #### Component-level Identities
 
-Component-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are specific to the component. A Component-level identity may be referenced
-(and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`).
-This does not override the component-level identity, it essentially creates a copy of it and updates
-the copy's properties.
+Component-level identities is an optional identities block containing a list of zero or more identity descriptors that are specific to the component. A Component-level identity may be referenced (and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`). This does not override the component-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -237,9 +188,7 @@
 
 #### Component-level Auth-to-local-properties
 
-Component-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Component-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -247,19 +196,17 @@
 
 #### Component-level Configurations
 
-Component-level configurations is an optional configurations block listing zero or more configuration
-descriptors that are specific to the component.
+Component-level configurations is an optional configurations block listing zero or more configuration descriptors that are specific to the component.
 
 See [configurations](#configurations).
 
-### Descriptor Specifications
+### Kerberos Descriptor Specifications
 
 <a name="properties"></a>
 
 #### properties
 
-The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is
-a set of name/value pairs as follows:
+The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is a set of name/value pairs as follows:
 
 ```
 "properties" : {
@@ -273,14 +220,9 @@
 
 #### auth-to-local-properties
 
-The `auth-to-local-properties` block is valid in the stack-, service-, and component-level
-descriptors. This block is a list of configuration specifications
-(`config-type/property_name[|concatenation_scheme]`) indicating which properties contain
-auth-to-local rules that should be dynamically updated based on the identities used within the
-Kerberized cluster.
+The `auth-to-local-properties` block is valid in the stack-, service-, and component-level descriptors. This block is a list of configuration specifications (`config-type/property_name[|concatenation_scheme]`) indicating which properties contain auth-to-local rules that should be dynamically updated based on the identities used within the Kerberized cluster.
 
-The specification optionally declares the concatenation scheme to use to append
-the rules into a rule set value. If specified one of the following schemes may be set:
+The specification optionally declares the concatenation scheme to use to append the rules into a rule set value. If specified one of the following schemes may be set:
 
 - **`new_lines`** - rules in the rule set are separated by a new line (`\n`)
 - **`new_lines_escaped`** - rules in the rule set are separated by a `\` and a new line (`\n`)
@@ -300,15 +242,9 @@
 
 #### configurations
 
-A `configurations` block may exist in stack-, service-, and component-level descriptors.
-This block is a list of one or more configuration blocks containing a single structure named using
-the configuration type and containing values for each relevant property.
+A `configurations` block may exist in stack-, service-, and component-level descriptors. This block is a list of one or more configuration blocks containing a single structure named using the configuration type and containing values for each relevant property.
 
-Each property name and value may be a concrete value or contain variables to be replaced using values
-from the stack-level `properties` block or any available configuration. Properties from the `properties`
-block are referenced by name (`${property_name}`), configuration properties are reference by
-configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path
-(`principals/SERVICE/COMPONENT/principal_name`).
+Each property name and value may be a concrete value or contain variables to be replaced using values from the stack-level `properties` block or any available configuration. Properties from the `properties` block are referenced by name (`${property_name}`), configuration properties are reference by configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path (`principals/SERVICE/COMPONENT/principal_name`).
 
 ```
 "configurations" : [
@@ -329,8 +265,7 @@
 ]
 ```
 
-If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would
-effectively be translated to
+If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would effectively be translated to
 
 ```
 "configurations" : [
@@ -355,25 +290,11 @@
 
 #### identities
 
-An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block
-is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`,
-an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab`
-descriptor.
+An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`, an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab` descriptor.
 
-The `name` property of an `identity` descriptor should be a concrete name that is unique with in its
-`local` scope (stack, service, or component). However, to maintain backwards-compatibility with
-previous versions of Ambari, it may be a reference identifier to some other identity in the
-Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
+The `name` property of an `identity` descriptor should be a concrete name that is unique with in its `local` scope (stack, service, or component). However, to maintain backwards-compatibility with previous versions of Ambari, it may be a reference identifier to some other identity in the Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
 
-The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the
-properties from referenced identity is to be used as the base for the current identity and any properties
-specified in the local identity block overrides the base data. In this scenario, the base data is copied
-to the local identities and therefore changes are realized locally, not globally. Referenced identities
-may be hierarchical, so a referenced identity may reference another identity, and so on.  Because of
-this, care must be taken not to create cyclic references. Reference values must be in the form of a
-relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../`
-and may be specified in component-level identity descriptors to reference an identity descriptor
-in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
+The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the properties from referenced identity is to be used as the base for the current identity and any properties specified in the local identity block overrides the base data. In this scenario, the base data is copied to the local identities and therefore changes are realized locally, not globally. Referenced identities may be hierarchical, so a referenced identity may reference another identity, and so on. Because of this, care must be taken not to create cyclic references. Reference values must be in the form of a relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../` and may be specified in component-level identity descriptors to reference an identity descriptor in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
 
 - **Stack-level** identity reference: `/identitiy_name`
 - **Service-level** identity reference: `/SERVICE_NAME/identitiy_name`
@@ -407,31 +328,15 @@
 
 #### principal
 
-The `principal` block is an optional block inside an `identity` descriptor block. It declares the
-details about the identity’s principal, including the principal’s `value`, the `type` (user or service),
-the relevant `configuration` property, and a local username mapping. All properties are optional; however
-if no base or default value is available (via the parent identity's `reference` value) for all properties,
-the principal may be ignored.
+The `principal` block is an optional block inside an `identity` descriptor block. It declares the details about the identity’s principal, including the principal’s `value`, the `type` (user or service), the relevant `configuration` property, and a local username mapping. All properties are optional; however if no base or default value is available (via the parent identity's `reference` value) for all properties, the principal may be ignored.
 
-The `value` property of the principal is expected to be the normalized principal name, including the
-principal’s components and realm. In most cases, the realm should be specified using the realm variable
-(`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be
-used to represent the relevant hostname.  This value is typically replaced on the agent side by either
-the agent-side scripts or the services themselves to be the hostname of the current host. However the
-built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is
-not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
+The `value` property of the principal is expected to be the normalized principal name, including the principal’s components and realm. In most cases, the realm should be specified using the realm variable (`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be used to represent the relevant hostname. This value is typically replaced on the agent side by either the agent-side scripts or the services themselves to be the hostname of the current host. However the built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
 
-The `type` property of the principal may be either `user` or `service`. If not specified, the type is
-assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory.
-It is especially important in the Active Directory case due to how accounts are created. It also,
-indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface
-behavior and data caching.
+The `type` property of the principal may be either `user` or `service`. If not specified, the type is assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory. It is especially important in the Active Directory case due to how accounts are created. It also, indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface behavior and data caching.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to this principal's `value` (after its variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to this principal's `value` (after its variables have been replaced).
 
-The `local_username` property, if supplied, indicates which local user account to use when generating
-auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
+The `local_username` property, if supplied, indicates which local user account to use when generating auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
 
 ```
 "principal" : {
@@ -454,24 +359,15 @@
 
 #### keytab
 
-The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to
-create and store the relevant keytab file.  This block declares the keytab file's path in the local
-filesystem of the destination host, the permissions to assign to that file, and the relevant
-configuration property.
+The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to create and store the relevant keytab file. This block declares the keytab file's path in the local filesystem of the destination host, the permissions to assign to that file, and the relevant configuration property.
 
-The `file` property declares an absolute path to use to store the keytab file when distributing to
-relevant hosts. If this is not supplied, the keytab file will not be created.
+The `file` property declares an absolute path to use to store the keytab file when distributing to relevant hosts. If this is not supplied, the keytab file will not be created.
 
-The `owner` property is an optional block indicating the local user account to assign as the owner of
-the file and what access  (`"rw"` - read/write; `"r"` - read-only) should
-be granted to that user. By default, the owner will be given read-only access.
+The `owner` property is an optional block indicating the local user account to assign as the owner of the file and what access (`"rw"` - read/write; `"r"` - read-only) should be granted to that user. By default, the owner will be given read-only access.
 
-The `group` property is an optional block indicating which local group to assigned as the group owner
-of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted
-to local user accounts in that group. By default, the group will be given no access.
+The `group` property is an optional block indicating which local group to assigned as the group owner of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted to local user accounts in that group. By default, the group will be given no access.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to the path of this keytabs file (after any variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to the path of this keytabs file (after any variables have been replaced).
 
 ```
 "keytab" : {
@@ -492,11 +388,9 @@
 
 #### services
 
-A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file.
-This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
+A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file. This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
 
-Each service block contains a service `name`, and optionals `identities`,  `auth_to_local_properties`
-`configurations`, and `components` blocks.
+Each service block contains a service `name`, and optionals `identities`, `auth_to_local_properties` `configurations`, and `components` blocks.
 
 ```
 "services": [
@@ -538,10 +432,7 @@
 
 #### components
 
-A `components` block may exist within a `service` descriptor block. This block is a list of zero or
-more component descriptors belonging to the containing service descriptor. Each component descriptor
-is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`,
-and `configurations` blocks.
+A `components` block may exist within a `service` descriptor block. This block is a list of zero or more component descriptors belonging to the containing service descriptor. Each component descriptor is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`, and `configurations` blocks.
 
 ```
 "components": [
@@ -566,8 +457,8 @@
 ### Examples
 
 #### Example Stack-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```json5
 {
@@ -576,23 +467,23 @@
   // Since variable replacement is recursive, ${realm} will resolve
   // to ${kerberos-env/realm}, which in-turn will resolve to the
   // declared default realm for the cluster
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs"
+  properties: {
+    realm: '${kerberos-env/realm}',
+    keytab_dir: '/etc/security/keytabs',
   },
   // A list of global Kerberos identities. These may be referenced
   // using /identity_name. For example the “spnego” identity may be
   // referenced using “/spnego”
-  "identities": [
+  identities: [
     {
-      "name": "spnego",
+      name: 'spnego',
       // Details about this identity's principal. This instance does not
       // declare any value for configuration or local username. That is
       // left up to the services and components use wish to reference
       // this principal and set overrides for those values.
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type" : "service"
+      principal: {
+        value: 'HTTP/_HOST@${realm}',
+        type: 'service',
       },
       // Details about this identity’s keytab file. This keytab file
       // will be created in the configured keytab file directory with
@@ -607,31 +498,31 @@
       // be set in any configuration file by default. Services and
       // components need to reference this identity to update this
       // value as needed.
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
+      keytab: {
+        file: '${keytab_dir}/spnego.service.keytab',
+        owner: {
+          name: 'root',
+          access: 'r',
         },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
+        group: {
+          name: '${cluster-env/user_group}',
+          access: 'r',
+        },
+      },
     },
     {
-      "name": "smokeuser",
+      name: 'smokeuser',
       // Details about this identity's principal. This instance declares
       // a configuration and local username mapping. Services and
       // components can override this to set additional configurations
       // that should be set to this principal value.  Overriding the
       // local username may create undesired behavior since there may be
       // conflicting entries in relevant auth-to-local rule sets.
-      "principal": {
-        "value": "${cluster-env/smokeuser}@${realm}",
-        "type" : "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username" : "${cluster-env/smokeuser}"
+      principal: {
+        value: '${cluster-env/smokeuser}@${realm}',
+        type: 'user',
+        configuration: 'cluster-env/smokeuser_principal_name',
+        local_username: '${cluster-env/smokeuser}',
       },
       // Details about this identity’s keytab file. This keytab file
       // will be created in the configured keytab file directory with
@@ -643,74 +534,74 @@
       // is desired that multiple keytab files are created, these
       // values may be overridden in a reference within a service or
       // component.
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
+      keytab: {
+        file: '${keytab_dir}/smokeuser.headless.keytab',
+        owner: {
+          name: '${cluster-env/smokeuser}',
+          access: 'r',
         },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
+        group: {
+          name: '${cluster-env/user_group}',
+          access: 'r',
         },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ]
+        configuration: 'cluster-env/smokeuser_keytab',
+      },
+    },
+  ],
 }
 ```
 
 #### Example Service-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```json5
 {
   // One or more services may be listed in a service-level Kerberos
   // Descriptor file
-  "services": [
+  services: [
     {
-      "name": "SERVICE_1",
+      name: 'SERVICE_1',
       // Service-level identities to be created if this service is installed.
       // Any relevant keytab files will be distributed to hosts with at least
       // one of the components on it.
-      "identities": [
+      identities: [
         // Service-specific identity declaration, declaring all properties
         // needed initiate the creation of the principal and keytab files,
         // as well as setting the service-specific  configurations.  This may
         // be referenced by contained components using ../service1_identity.
         {
-          "name": "service1_identity",
-          "principal": {
-            "value": "service1/_HOST@${realm}",
-            "type" : "service",
-            "configuration": "service1-site/service1.principal"
+          name: 'service1_identity',
+          principal: {
+            value: 'service1/_HOST@${realm}',
+            type: 'service',
+            configuration: 'service1-site/service1.principal',
           },
-          "keytab": {
-            "file": "${keytab_dir}/service1.service.keytab",
-            "owner": {
-              "name": "${service1-env/service_user}",
-              "access": "r"
+          keytab: {
+            file: '${keytab_dir}/service1.service.keytab',
+            owner: {
+              name: '${service1-env/service_user}',
+              access: 'r',
             },
-            "group": {
-              "name": "${cluster-env/user_group}",
-              "access": "r"
+            group: {
+              name: '${cluster-env/user_group}',
+              access: 'r',
             },
-            "configuration": "service1-site/service1.keytab.file"
-          }
+            configuration: 'service1-site/service1.keytab.file',
+          },
         },
         // Service-level identity referencing the stack-level spnego
         // identity and overriding the principal and keytab configuration
         // specifications.
         {
-          "name": "service1_spnego",
-          "reference": "/spnego",
-          "principal": {
-            "configuration": "service1-site/service1.web.principal"
+          name: 'service1_spnego',
+          reference: '/spnego',
+          principal: {
+            configuration: 'service1-site/service1.web.principal',
           },
-          "keytab": {
-            "configuration": "service1-site/service1.web.keytab.file"
-          }
+          keytab: {
+            configuration: 'service1-site/service1.web.keytab.file',
+          },
         },
         // Service-level identity referencing the stack-level smokeuser
         // identity. No properties are being overridden and overriding
@@ -719,137 +610,135 @@
         // keytab file is distributed to all hosts where components of this
         // this service are installed.
         {
-          "name": "service1_smokeuser",
-          "reference": "/smokeuser"
-        }
+          name: 'service1_smokeuser',
+          reference: '/smokeuser',
+        },
       ],
       // Properties related to this service that require the auth-to-local
       // rules to be dynamically generated based on identities create for
       // the cluster.
-      "auth_to_local_properties" : [
-        "service1-site/security.auth_to_local"
-      ],
+      auth_to_local_properties: ['service1-site/security.auth_to_local'],
       // Configuration properties to be set when this service is installed,
       // no matter which components are installed
-      "configurations": [
+      configurations: [
         {
-          "service-site": {
-            "service1.security.authentication": "kerberos",
-            "service1.security.auth_to_local": ""
-          }
-        }
+          'service-site': {
+            'service1.security.authentication': 'kerberos',
+            'service1.security.auth_to_local': '',
+          },
+        },
       ],
       // A list of components related to this service
-      "components": [
+      components: [
         {
-          "name": "COMPONENT_1",
+          name: 'COMPONENT_1',
           // Component-specific identities to be created when this component
           // is installed.  Any keytab files specified will be distributed
           // only to the hosts where this component is installed.
-          "identities": [
+          identities: [
             // An identity "local" to this component
             {
-              "name": "component1_service_identity",
-              "principal": {
-                "value": "component1/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "service1-site/comp1.principal",
-                "local_username" : "${service1-env/service_user}"
+              name: 'component1_service_identity',
+              principal: {
+                value: 'component1/_HOST@${realm}',
+                type: 'service',
+                configuration: 'service1-site/comp1.principal',
+                local_username: '${service1-env/service_user}',
               },
-              "keytab": {
-                "file": "${keytab_dir}/s1c1.service.keytab",
-                "owner": {
-                  "name": "${service1-env/service_user}",
-                  "access": "r"
+              keytab: {
+                file: '${keytab_dir}/s1c1.service.keytab',
+                owner: {
+                  name: '${service1-env/service_user}',
+                  access: 'r',
                 },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
+                group: {
+                  name: '${cluster-env/user_group}',
+                  access: '',
                 },
-                "configuration": "service1-site/comp1.keytab.file"
-              }
+                configuration: 'service1-site/comp1.keytab.file',
+              },
             },
             // The stack-level spnego identity overridden to set component-specific
             // configurations
             {
-              "name": "component1_spnego_1",
-              "reference": "/spnego",
-              "principal": {
-                "configuration": "service1-site/comp1.spnego.principal"
+              name: 'component1_spnego_1',
+              reference: '/spnego',
+              principal: {
+                configuration: 'service1-site/comp1.spnego.principal',
               },
-              "keytab": {
-                "configuration": "service1-site/comp1.spnego.keytab.file"
-              }
+              keytab: {
+                configuration: 'service1-site/comp1.spnego.keytab.file',
+              },
             },
             // The stack-level spnego identity overridden to set a different set of component-specific
             // configurations
             {
-              "name": "component1_spnego_2",
-              "reference": "/spnego",
-              "principal": {
-                "configuration": "service1-site/comp1.someother.principal"
+              name: 'component1_spnego_2',
+              reference: '/spnego',
+              principal: {
+                configuration: 'service1-site/comp1.someother.principal',
               },
-              "keytab": {
-                "configuration": "service1-site/comp1.someother.keytab.file"
-              }
-            }
+              keytab: {
+                configuration: 'service1-site/comp1.someother.keytab.file',
+              },
+            },
           ],
           // Component-specific configurations to set if this component is installed
-          "configurations": [
+          configurations: [
             {
-              "service-site": {
-                "comp1.security.type": "kerberos"
-              }
-            }
-          ]
+              'service-site': {
+                'comp1.security.type': 'kerberos',
+              },
+            },
+          ],
         },
         {
-          "name": "COMPONENT_2",
-          "identities": [
+          name: 'COMPONENT_2',
+          identities: [
             {
-              "name": "component2_service_identity",
-              "principal": {
-                "value": "component2/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "service1-site/comp2.principal",
-                "local_username" : "${service1-env/service_user}"
+              name: 'component2_service_identity',
+              principal: {
+                value: 'component2/_HOST@${realm}',
+                type: 'service',
+                configuration: 'service1-site/comp2.principal',
+                local_username: '${service1-env/service_user}',
               },
-              "keytab": {
-                "file": "${keytab_dir}/s1c2.service.keytab",
-                "owner": {
-                  "name": "${service1-env/service_user}",
-                  "access": "r"
+              keytab: {
+                file: '${keytab_dir}/s1c2.service.keytab',
+                owner: {
+                  name: '${service1-env/service_user}',
+                  access: 'r',
                 },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
+                group: {
+                  name: '${cluster-env/user_group}',
+                  access: '',
                 },
-                "configuration": "service1-site/comp2.keytab.file"
-              }
+                configuration: 'service1-site/comp2.keytab.file',
+              },
             },
             // The service-level service1_identity identity overridden to
             // set component-specific configurations
             {
-              "name": "component2_service1_identity",
-              "reference": "../service1_identity",
-              "principal": {
-                "configuration": "service1-site/comp2.service.principal"
+              name: 'component2_service1_identity',
+              reference: '../service1_identity',
+              principal: {
+                configuration: 'service1-site/comp2.service.principal',
               },
-              "keytab": {
-                "configuration": "service1-site/comp2.service.keytab.file"
-              }
-            }
+              keytab: {
+                configuration: 'service1-site/comp2.service.keytab.file',
+              },
+            },
           ],
-          "configurations" : [
+          configurations: [
             {
-              "service-site" : {
-                "comp2.security.type": "kerberos"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
+              'service-site': {
+                'comp2.security.type': 'kerberos',
+              },
+            },
+          ],
+        },
+      ],
+    },
+  ],
 }
 ```
diff --git a/docs/ambari-design/stack-and-services/faq.md b/docs/ambari-design/stack-and-services/faq.md
index d19f838..06e68f2 100644
--- a/docs/ambari-design/stack-and-services/faq.md
+++ b/docs/ambari-design/stack-and-services/faq.md
@@ -6,18 +6,17 @@
 
 Ambari goes property by property and merge them from parent to child. So if you remove a category for example from the child it will be inherited from parent, that goes for pretty much all properties.
 
-So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like 
+So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like
 
+- For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
 
-* For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
+- The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
 
-* The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
-
-* Cardinality is overwritten by a child or take from the parent if child has not provided one.
+- Cardinality is overwritten by a child or take from the parent if child has not provided one.
 
 You could look at this method for more details: `org.apache.ambari.server.api.util.StackExtensionHelper#mergeServices`
 
-For more information see the [Service Inheritance](./custom-services.md#Service20%Inheritance) wiki page.
+For more information see the [Service Inheritance](./custom-services.md#service-inheritance) wiki page.
 
 **If a component is missing in the new definition but is present in the parent, does it get inherited?**
 
@@ -26,4 +25,3 @@
 **Configuration dependencies for the service -- are they overwritten or merged?**
 
 Overwritten.
-
diff --git a/docs/ambari-design/stack-and-services/stack-inheritance.md b/docs/ambari-design/stack-and-services/stack-inheritance.md
index 8d5184d..5fd3ffd 100644
--- a/docs/ambari-design/stack-and-services/stack-inheritance.md
+++ b/docs/ambari-design/stack-and-services/stack-inheritance.md
@@ -1,4 +1,3 @@
-
 # Stack Inheritance
 
 Each stack version must provide a metainfo.xml descriptor file which can declare whether the stack inherits from another stack version:
@@ -17,21 +16,21 @@
 
 The following files should not be redefined at the child stack version level:
 
-* properties/stack_features.json
-* properties/stack_tools.json
+- properties/stack_features.json
+- properties/stack_tools.json
 
 Note: These files should only exist at the base stack level.
 
 The following files if defined in the current stack version replace the definitions from the parent stack version:
 
-* kerberos.json
-* widgets.json
+- kerberos.json
+- widgets.json
 
 The following files if defined in the current stack version are merged with the parent stack version:
 
-* configuration/cluster-env.xml
+- configuration/cluster-env.xml
 
-* role_command_order.json
+- role_command_order.json
 
 Note: All the services' role command orders will be merge with the stack's role command order to provide a master list.
 
@@ -39,14 +38,14 @@
 
 The following directories if defined in the current stack version replace those from the parent stack version:
 
-* hooks
+- hooks
 
 This means the files included in those directories at the parent level will not be inherited. You will need to copy all the files you wish to keep from that directory structure.
 
 The following directories are not inherited:
 
-* repos
-* upgrades
+- repos
+- upgrades
 
 The repos/repoinfo.xml file should be defined in every stack version. The upgrades directory and its corresponding XML files should be defined in all stack versions that support upgrade.
 
@@ -59,10 +58,10 @@
       def __init__(self):
         super(HDP23StackAdvisor, self).__init__()
         Logger.initialize_logger()
- 
+
       def getComponentLayoutValidations(self, services, hosts):
         parentItems = super(HDP23StackAdvisor, self).getComponentLayoutValidations(services, hosts)
                  ...
 ```
 
-Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#Service20%Inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
+Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#service-inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
diff --git a/docs/ambari-design/views/index.md b/docs/ambari-design/views/index.md
index 81db71c..790c9d7 100644
--- a/docs/ambari-design/views/index.md
+++ b/docs/ambari-design/views/index.md
@@ -1,39 +1,36 @@
 # Views
 
-:::info
-This capability is currently under development.
-:::info
+:::info This capability is currently under development. :::info
 
 **Ambari Views** offer a systematic way to plug-in UI capabilities to surface custom visualization, management and monitoring features in Ambari Web. A " **view**" is a way of extending Ambari that allows 3rd parties to plug in new resource types along with the APIs, providers and UI to support them. In other words, a view is an application that is deployed into the Ambari container.
 
-
 ## Useful Resources
 
-Resource | Link
----------|-------
-Views Overview  | http://www.slideshare.net/hortonworks/ambari-views-overview
-Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md
-Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples
+| Resource                 | Link                                                                   |
+| ------------------------ | ---------------------------------------------------------------------- |
+| Views Overview           | http://www.slideshare.net/hortonworks/ambari-views-overview            |
+| Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md |
+| Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples      |
 
 ## Terminology
 
 The following section describes the basic terminology associated with views.
 
-Term | Description
----------|-------
-View Name     | The name of the view. The view name identifies the view to Ambari.
-View Version  | The version of the view. A unique view name can have multiple versions deployed in Ambari.
-View Package  | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#View20%Package) for more information on the contents and structure of the package.
-View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features.
-View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information.
-View API  | The REST API for viewing the list of deployed views and creating view instances. See View API for more information.
-Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information.
+| Term | Description |
+| --- | --- |
+| View Name | The name of the view. The view name identifies the view to Ambari. |
+| View Version | The version of the view. A unique view name can have multiple versions deployed in Ambari. |
+| View Package | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#view-package) for more information on the contents and structure of the package. |
+| View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features. |
+| View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information. |
+| View API | The REST API for viewing the list of deployed views and creating view instances. See View API for more information. |
+| Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information. |
 
 ## Components of a View
 
 A view can consist of **client-side assets** (i.e. the UI that is exposed in Ambari Web) and **server-side resources** (i.e. the classes that expose REST end points). When the view loads into Ambari Web, the view UI can use the view server-side resources as necessary to deliver the view functionality.
 
-![Apache Ambari > Views > view-components.jpg](./imgs/view-components.jpg "Apache Ambari > Views > view-components.jpg")
+![Apache Ambari > Views > view-components.jpg](./imgs/view-components.jpg 'Apache Ambari > Views > view-components.jpg')
 
 ### Client-side Assets
 
@@ -43,11 +40,9 @@
 
 A view can expose resources as REST end points to be used in conjunction with the client-side to deliver the functionality of your view application. Thees resources are written in Java and can be anything from a servlet to a regular REST service to an Ambari ResourceProvider (i.e. a special type of REST service that handles some REST capabilities such as partial response and pagination – if you adhere to the Ambari ResourceProvider interface). See [Framework Services](./framework-services.md) for more information on capabilities that the framework exposes on the server-side for views.
 
-:::info
-Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
+:::info Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
 
-[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view)
-:::
+[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view) :::
 
 ## View Package
 
@@ -72,11 +67,12 @@
 ## Versions and Instances
 
 Multiple versions of a given view can be deployed into Ambari and multiple instances of each view can be created for each version. For example, I can have a view named FILES and deploy versions 0.1.0 and 0.2.0. I can then create instances of each version `FILES_0.1.0` and `FILES_0.2.0` allowing some Ambari users to have an older version of FILES (0.1.0), and other users to have a newer version (0.2.0).
+
 ## Versions and Instances
 
 Multiple versions of a given view can be deployed into Ambari and multiple instances of each view can be created for each version. For example, I can have a view named FILES and deploy versions 0.1.0 and 0.2.0. I can then create instances of each version `FILES_0.1.0` and `FILES_0.2.0` allowing some Ambari users to have an older version of FILES (0.1.0), and other users to have a newer version (0.2.0). I can also create multiple instances for each version, configuring each differently.
 
-![Apache Ambari > Views > view-versions.jpg](./imgs/view-versions.jpg "Apache Ambari > Views > view-versions.jpg")
+![Apache Ambari > Views > view-versions.jpg](./imgs/view-versions.jpg 'Apache Ambari > Views > view-versions.jpg')
 
 ### Instance Configuration Parameters
 
@@ -90,4 +86,4 @@
 
 The lifecycle of a view is shown below. As you deploy a view and create instances of a view, server-side framework events are invoked. See [Framework Services](./framework-services.md) for more information on capabilities that the framework exposes on the server-side for views.
 
-![Apache Ambari > Views > view-lifecycle.png](./imgs/view-lifecycle.png "Apache Ambari > Views > view-lifecycle.png")
+![Apache Ambari > Views > view-lifecycle.png](./imgs/view-lifecycle.png 'Apache Ambari > Views > view-lifecycle.png')
diff --git a/docs/ambari-dev/index.md b/docs/ambari-dev/index.md
index e5e6f84..b4e6cd4 100644
--- a/docs/ambari-dev/index.md
+++ b/docs/ambari-dev/index.md
@@ -18,11 +18,9 @@
 
 Alternatively, you can easily launch a VM that is preconfigured with all the tools that you need. See the **Pre-Configured Development Environment** section in the [Quick Start Guide](../quick-start/quick-start-guide.md).
 
-* xCode (if using Mac - free download from the apple store)
-* JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
-* [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later
-Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files:
-File: ~/.profile
+- xCode (if using Mac - free download from the apple store)
+- JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
+- [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files: File: ~/.profile
 
 ```bash
 source ~/.bashrc
@@ -36,10 +34,8 @@
 export _JAVA_OPTIONS="-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true"
 ```
 
-
-* Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
-* Python setuptools:
-for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
+- Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
+- Python setuptools: for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
 
 ```bash
 sh setuptools-0.6c11-py2.6.egg
@@ -51,61 +47,53 @@
 sh setuptools-0.6c11-py2.7.egg
 ```
 
-
-* rpmbuild (rpm-build package)
-* g++ (gcc-c++ package)
+- rpmbuild (rpm-build package)
+- g++ (gcc-c++ package)
 
 ## Running Unit Tests
 
-* `mvn clean test`
-* Run unit tests in a single module:
+- `mvn clean test`
+- Run unit tests in a single module:
 
 ```bash
 mvn -pl ambari-server test
 ```
 
-
-* Run only Java tests:
+- Run only Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests
 ```
 
-
-* Run only specific Java tests:
+- Run only specific Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests -Dtest=AgentHostInfoTest test
 ```
 
-
-* Run only Python tests:
+- Run only Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests test
 ```
 
-
-* Run only specific Python tests:
+- Run only specific Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests -Dpython.test.mask=TestUtils.py test
 ```
 
-
-* Run only Checkstyle and RAT checks:
+- Run only Checkstyle and RAT checks:
 
 ```bash
 mvn -pl ambari-server -DskipTests test
 ```
 
-
-
 NOTE: Please make sure you have npm in the path before running the unit tests.
 
 ## Generating Findbugs Report
 
-* mvn clean install
+- mvn clean install
 
 This will generate xml and html report unders target/findbugs. You can also add flags to skip unit tests to generate report faster.
 
@@ -115,7 +103,7 @@
 
 To build Ambari RPMs, run the following.
 
-Note: Replace ${AMBARI_VERSION} with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
+Note: Replace `${AMBARI_VERSION}` with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
 
 **Note**: If running into errors while compiling the ambari-metrics package due to missing the artifacts of jms, jmxri, jmxtools:
 
@@ -140,14 +128,14 @@
 
 ## Setting the Version Using Maven
 
-Ambari 2.8+ uses a newer method to update the version when building Ambari. 
+Ambari 2.8+ uses a newer method to update the version when building Ambari.
 
 **RHEL/CentOS 6**:
 
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
- 
+
 mvn -B clean install package rpm:rpm -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -156,7 +144,7 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package rpm:rpm -DskipTests -Psuse11 -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -165,36 +153,35 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package jdeb:jdeb -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
 Ambari Server will create following packages
 
-* RPM will be created under `{AMBARI_DIR}`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
+- RPM will be created under `{AMBARI_DIR}`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
 
-* DEB will be created under `{AMBARI_DIR}`/ambari-server/target/
+- DEB will be created under `{AMBARI_DIR}`/ambari-server/target/
 
 Ambari Agent will create following packages
 
-* RPM will be created under `{AMBARI_DIR}`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
+- RPM will be created under `{AMBARI_DIR}`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
 
-* DEB will be created under `{AMBARI_DIR}`/ambari-agent/target
+- DEB will be created under `{AMBARI_DIR}`/ambari-agent/target
 
 Optional parameters:
 
-* -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
+- -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
 
-* -DdefaultStackVersion=STACK-VERSION
-* Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
-* -DenableExperimental=true
-* Enables experimental features to be available via Ambari Web (default is false)
-* All views can be packaged in RPM by adding _-Dviews_ parameter
+- -DdefaultStackVersion=STACK-VERSION
+- Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
+- -DenableExperimental=true
+- Enables experimental features to be available via Ambari Web (default is false)
+- All views can be packaged in RPM by adding _-Dviews_ parameter
   - _mvn -B clean install package rpm:rpm -Dviews -DskipTests_
-* Specific views can be built by adding `--projects` parameter to the _-Dviews_
+- Specific views can be built by adding `--projects` parameter to the _-Dviews_
   - _mvn -B clean install package rpm:rpm --projects ambari-web,ambari-project,ambari-views,ambari-admin,contrib/views/files,contrib/views/pig,ambari-server,ambari-agent,ambari-client,ambari-shell -Dviews -DskipTests_
 
-
 _NOTE: Run everything as `root` below._
 
 ## Building Ambari Metrics
diff --git a/docs/ambari-dev/releasing-ambari.md b/docs/ambari-dev/releasing-ambari.md
index 06b1fe1..a025d23 100644
--- a/docs/ambari-dev/releasing-ambari.md
+++ b/docs/ambari-dev/releasing-ambari.md
@@ -1,8 +1,8 @@
 ---
 title: Releasing Ambari
 variables:
-  majorVersion: "2.8"
-  minorVersion: "0"
+  majorVersion: '2.8'
+  minorVersion: '0'
 ---
 
 # Releasing Ambari
@@ -11,13 +11,13 @@
 
 ### [Publishing Maven Artifacts](http://apache.org/dev/publishing-maven-artifacts.html)
 
-* Setting up release signing keys
-* Uploading artifacts to staging and release repositories
+- Setting up release signing keys
+- Uploading artifacts to staging and release repositories
 
 ### [Apache Release Guidelines](http://www.apache.org/legal/release-policy.html)
 
-* Release requirements
-* Process for staging
+- Release requirements
+- Process for staging
 
 ## Preparing for release
 
@@ -68,10 +68,10 @@
 Copy over {username}.asc to {username}@home.apache.org:public_html/~{username}.asc
 Verify URL http://home.apache.org/~{username}/{username}.asc
 Query PGP KeyServer http://pgp.mit.edu:11371/pks/lookup?search=0x{key}&op=vindex
-  
+
 Web of Trust:
 Request others to sign your PGP key.
- 
+
 Login at http://id.apache.org
 Add OpenPGP Fingerprint to your profile
 OpenPGP Public Key Primary Fingerprint: XXXX YYYY ZZZZ ....
@@ -101,7 +101,7 @@
 
 Create a branch for a release using branch-X.Y (ex: branch-2.1) as the name of the branch.
 
-Note: Going forward, we should be creating branch-{majorVersion}.{minorVersion}, so that the same branch can be used for maintenance releases.
+Note: Going forward, we should be creating branch-[majorVersion].[minorVersion], so that the same branch can be used for maintenance releases.
 
 **Checkout the release branch**
 
@@ -152,9 +152,8 @@
 # Review and commit the changes to branch-X.Y
 git commit
 ```
-:::danger
-Ambari 2.7 and Earlier Releases (Deprecated)
-:::
+
+:::danger Ambari 2.7 and Earlier Releases (Deprecated) :::
 
 Older Ambari branches still required that you update every `pom.xml` manually through the below process:
 
@@ -223,7 +222,7 @@
 
 **Setup Build**
 
-Setup Jenkins Job for the new branch on http://builds.apache.org 
+Setup Jenkins Job for the new branch on http://builds.apache.org
 
 ## Creating Release Candidate
 
@@ -301,9 +300,9 @@
 
 **Call for a vote on the dev@ambari.apache.org mailing list with something like this:**
 
-I have created an ambari-** release candidate.
+I have created an ambari-\*\* release candidate.
 
-GIT source tag (r***)
+GIT source tag (r\*\*\*)
 
 ```
 https://git-wip-us.apache.org/repos/asf/ambari/repo?p=ambari.git;a=log;h=refs/tags/release-x.y.z-rc0
@@ -323,8 +322,8 @@
 
 ## Publishing and Announcement
 
-* Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
-* Upload your PGP public key only to _/home/_
+- Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
+- Upload your PGP public key only to _/home/_
 
 Publish the release as below:
 
@@ -395,9 +394,9 @@
 
 - Login to https://reporter.apache.org/addrelease.html?ambari with apache credentials.
 - Fill out the fields:
-    - Committe: ambari
-    - Full version name: 2.2.0
-    - Date of release (YYYY-MM-DD):  2015-12-19
+  - Committe: ambari
+  - Full version name: 2.2.0
+  - Date of release (YYYY-MM-DD): 2015-12-19
 - Submit the data
 - Verify that the submitted data is reflected at https://reporter.apache.org/?ambari
 
@@ -405,4 +404,4 @@
 
 ## Publish Ambari artifacts to Maven central
 
-Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.  
+Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.
diff --git a/docs/ambari-plugin-contribution/scom/installation.md b/docs/ambari-plugin-contribution/scom/installation.md
index 6032c1d..25ca698 100644
--- a/docs/ambari-plugin-contribution/scom/installation.md
+++ b/docs/ambari-plugin-contribution/scom/installation.md
@@ -1,18 +1,18 @@
 # Installation
 
-## Prerequisite Software 
+## Prerequisite Software
 
 Setting up Ambari SCOM assumes the following prerequisite software:
 
-* Ambari SCOM 1.0
+- Ambari SCOM 1.0
   - Apache Hadoop 1.x cluster (HDFS and MapReduce) 1
-* Ambari SCOM 2.0
+- Ambari SCOM 2.0
   - Apache Hadoop 2.x cluster (HDFS and YARN/MapReduce) 2
-* JDK 1.7
-* Microsoft SQL Server 2012
-* Microsoft JDBC Driver 4.0 for SQL Server 3
-* Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
-* System Center Monitoring Agent installed on **Watcher Node** 4
+- JDK 1.7
+- Microsoft SQL Server 2012
+- Microsoft JDBC Driver 4.0 for SQL Server 3
+- Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
+- System Center Monitoring Agent installed on **Watcher Node** 4
 
 1 _Ambari SCOM_ 1.0 has been tested with a Hadoop cluster based on **Hortonworks Data Platform 1.3 for Windows** ("[HDP 1.3 for Windows](http://hortonworks.com/products/releases/hdp-1-3-for-windows/)")
 
@@ -22,7 +22,7 @@
 
 4 See Microsoft TechNet topic for [Managing Discovery and Agents](http://technet.microsoft.com/en-us/library/hh212772.aspx). Minimum Agent requirements _.NET 4_ and _PowerShell 2.0 + 3.0_
 
-## Package Contents 
+## Package Contents
 
 ```
 ├─ ambari-scom- _**version**_.zip
@@ -33,25 +33,23 @@
 └── ambari-scom.msi
 ```
 
-File | Name | Description
------|------|-------------
-server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. 
-metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink.
-ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink
-mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software.
+| File | Name | Description |
+| --- | --- | --- |
+| server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. |
+| metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink. |
+| ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink |
+| mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software. |
 
 ## Ambari SCOM Server Installation
 
-:::caution
-The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#id-2installation-mgmtpack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below.
-:::
+:::caution The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#installing-ambari-scom-management-pack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below. :::
 
 The following methods are available for installing Ambari SCOM Server:
 
-* **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
-* **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration. 
+- **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
+- **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration.
 
-## Manual Installation 
+## Manual Installation
 
 ### Configuring SQL Server
 
@@ -63,11 +61,9 @@
 
 5. Create the Ambari SCOM database schema by running the `Hadoop-Metrics-SQLServer-CREATE.ddl` script.
 
-:::info
-The Hadoop Metrics DDL script will create a database called "HadoopMetrics".
-:::
+:::info The Hadoop Metrics DDL script will create a database called "HadoopMetrics". :::
 
-### Configuring Hadoop Metrics Sink 
+### Configuring Hadoop Metrics Sink
 
 #### Preparing the Metrics Sink
 
@@ -75,8 +71,7 @@
 
 2. Obtain the _Microsoft JDBC Driver 4.0 for SQL Server_ `sqljdbc4.jar` file.
 
-3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar`
-on each host.
+3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar` on each host.
 
 #### Setup Hadoop Metrics2 Interface
 
@@ -95,30 +90,27 @@
 reducetask.sink.sql.databaseUrl=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
+:::info _Where:_
 
-* _server = the SQL Server hostname_
-* _port = the SQL Server port (for example, 1433)_
-* _user = the SQL Server user (for example, sa)_
-* _password = the SQL Server password (for example, BigData1)_
-:::
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
 1. Update the Java classpath for each Hadoop service to include the `metrics-sink-<strong><em>version</em></strong>.jar` and `sqljdbc4.jar` files.
 
+   - Example: Updating the Java classpath for _HDP for Windows_ clusters
 
-    - Example: Updating the Java classpath for _HDP for Windows_ clusters
+     The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
 
-      The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
+     ```
+     ...
 
-        ```
-        ...
-        
-        ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
-        
-        ...
-        
-        ```
+     ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
+
+     ...
+
+     ```
 
 2. Restart Hadoop for these changes to take affect.
 
@@ -129,9 +121,8 @@
 ```sql
 select * from HadoopMetrics.dbo.MetricRecord
 ```
-:::info
-In the above SQL statement, `HadoopMetrics` is the database name.
-:::
+
+:::info In the above SQL statement, `HadoopMetrics` is the database name. :::
 
 ### Installing and Configuring Ambari SCOM Server
 
@@ -158,34 +149,30 @@
 scom.sink.db.url=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
-  - _server = the SQL Server hostname_
-  - _port = the SQL Server port (for example, 1433)_
-  - _user = the SQL Server user (for example, sa)_
-  - _password = the SQL Server password (for example, BigData1)_
-:::
+:::info _Where:_
 
-6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server. 
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
-:::info
-Be sure to include the following in the classpath:
-  - `ambari-scom-server-version.jar` file
-  - configuration folder containing the Ambari SCOM configuration files
-  - lib folder containing the Ambari SCOM dependencies
-  - folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
-  - `sqljdbc4.jar` SQLServer JDBC Driver file
-::
-  
+6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server.
+
+:::info Be sure to include the following in the classpath:
+
+- `ambari-scom-server-version.jar` file
+- configuration folder containing the Ambari SCOM configuration files
+- lib folder containing the Ambari SCOM dependencies
+- folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
+- `sqljdbc4.jar` SQLServer JDBC Driver file ::
+
 For example:
 
 ```bash
 java -server -XX:NewRatio=3 -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -XX:CMSInitiatingOccupancyFraction=60 -Xms512m -Xmx2048m -cp "c:\ambari-scom\server\conf;c:\ambari-scom\server\lib\*;c:\jdbc\sqljdbc4.jar;c:\hadoop\install\dir;c:\ambari-scom\server\ambari-scom-server-1.5.1.2.0.0.0-673.jar" org.apache.ambari.scom.AmbariServer
 ```
 
-:::info
-In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file.
-:::
+:::info In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file. :::
 
 #### Verify the Server API
 
@@ -194,6 +181,7 @@
 ```
 http://[ambari-scom-server]:8080/api/v1/clusters
 ```
+
 2. Verify that metrics are being reported.
 
 ```
@@ -217,33 +205,29 @@
 
 3. Run the `ambari-scom.msi` installer. The "Ambari SCOM Setup" dialog appears:
 
-     ![](./imgs/ambari-scom-msi2.png)
+   ![](./imgs/ambari-scom-msi2.png)
 
-4. Provide the following information: 
+4. Provide the following information:
 
-Field | Description
-------|------------
-Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari
-SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics.
-SQL Server port | The port of the SQL Server instance.
-SQL Server login | The login username.
-SQL Server password | The login password
-Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file.
-Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file.
+| Field | Description |
+| --- | --- |
+| Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari |
+| SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics. |
+| SQL Server port | The port of the SQL Server instance. |
+| SQL Server login | The login username. |
+| SQL Server password | The login password |
+| Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file. |
+| Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file. |
 
 5. You can optionally select to Start Services
 6. Click Install
 7. After completion, links are created on the desktop to "Start Ambari SCOM Server", "Browse Ambari API" and "Browse Ambari API Metrics". After starting the Ambari SCOM Server, browse the API and Metrics to confirm the server is working properly.
 
-:::info
-The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log`
-:::
+:::info The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log` :::
 
 ### Installing Ambari SCOM Management Pack
 
-:::info
-Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions.
-:::
+:::info Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions. :::
 
 #### Import the Management Pack
 
@@ -272,14 +256,13 @@
 Ambari.SCOM.Management.mpb
 Ambari.SCOM.Presentation.mpb
 ```
+
 10. Click "Open"
 11. Review the Import list and click "Install".
 
 12. The Ambari SCOM Management Pack installation will start.
 
-:::info
-The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release.
-:::
+:::info The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release. :::
 
 #### Create Run As Account
 
@@ -317,9 +300,7 @@
 http://[ambari-scom-server]:8080/api/
 ```
 
-:::info
-In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server.
-:::
+:::info In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server. :::
 
 6. Select the Run As Account that you created in Create Run As Account.
 
@@ -337,4 +318,4 @@
 
 ## Monitoring Scenarios
 
-[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
\ No newline at end of file
+[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
diff --git a/package.json b/package.json
index fdba450..03727c6 100644
--- a/package.json
+++ b/package.json
@@ -40,12 +40,12 @@
     "@docusaurus/preset-classic": "3.7.0",
     "@mdx-js/react": "^3.0.0",
     "clsx": "^2.0.0",
+    "prism-react-renderer": "^2.3.0",
     "docusaurus-plugin-less": "^2.0.2",
     "less": "^4.2.0",
     "less-loader": "^11.1.3",
-    "prism-react-renderer": "^2.3.0",
-    "react": "^18.2.0",
-    "react-dom": "^18.2.0",
+    "react": "^19.0.0",
+    "react-dom": "^19.0.0",
     "react-particles": "^2.12.2",
     "tsparticles": "^2.12.0"
   },
@@ -56,7 +56,7 @@
     "@docusaurus/types": "3.7.0",
     "cz-conventional-changelog": "^3.3.0",
     "standard-version": "^9.5.0",
-    "typescript": "^5.2.2"
+    "typescript": "~5.6.2"
   },
   "browserslist": {
     "production": [
diff --git a/versioned_docs/version-2.7.5/ambari-design/kerberos/kerberos_descriptor.md b/versioned_docs/version-2.7.5/ambari-design/kerberos/kerberos_descriptor.md
index 2dd4798..871ca37 100644
--- a/versioned_docs/version-2.7.5/ambari-design/kerberos/kerberos_descriptor.md
+++ b/versioned_docs/version-2.7.5/ambari-design/kerberos/kerberos_descriptor.md
@@ -1,6 +1,7 @@
 ---
 title: The Kerberos Descriptor
 ---
+
 <!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
@@ -18,19 +19,18 @@
 limitations under the License.
 -->
 
-
 - [Introduction](index.md)
 - [The Kerberos Descriptor](#the-kerberos-descriptor)
   - [Components of a Kerberos Descriptor](#components-of-a-kerberos-descriptor)
     - [Stack-level Properties](#stack-level-properties)
     - [Stack-level Identities](#stack-level-identities)
     - [Stack-level Auth-to-local-properties](#stack-level-auth-to-local-properties)
-    - [Stack-level Configurations](#stack-level-configuratons)
+    - [Stack-level Configurations](#stack-level-configurations)
     - [Services](#services)
     - [Service-level Identities](#service-level-identities)
     - [Service-level Auth-to-local-properties](#service-level-auth-to-local-properties)
     - [Service-level Configurations](#service-level-configurations)
-    - [Components](#service-components)
+    - [Components](#components)
     - [Component-level Identities](#component-level-identities)
     - [Component-level Auth-to-local-properties](#component-level-auth-to-local-properties)
     - [Component-level Configurations](#component-level-configurations)
@@ -51,25 +51,15 @@
 
 ## The Kerberos Descriptor
 
-The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable
-or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should
-be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to
-be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details
-in the service-level descriptors.
+The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details in the service-level descriptors.
 
-For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This
-ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the
-relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
+For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
 
-For a component of a service to be Kerberized, there must be an entry for it in its containing service's
-service-level descriptor. This allows for some of a services' components to be managed and other
-components of that service to be ignored by the automated Kerberos facility.
+For a component of a service to be Kerberized, there must be an entry for it in its containing service's service-level descriptor. This allows for some of a services' components to be managed and other components of that service to be ignored by the automated Kerberos facility.
 
-Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full
-descriptor - partial descriptors are not allowed.
+Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full descriptor - partial descriptors are not allowed.
 
-A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors,
-and any updates from user input) has the following structure:
+A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors, and any updates from user input) has the following structure:
 
 - Stack-level Properties
 - Stack-level Identities
@@ -84,9 +74,7 @@
     - Component-level Auth-to-local-properties
     - Component-level Configurations
 
-Each level of the descriptor inherits the data from its parent. This data, however, may be overridden
-if necessary. For example, a component will inherit the configurations and identities of its container
-service; which in turn inherits the configurations and identities from the stack.
+Each level of the descriptor inherits the data from its parent. This data, however, may be overridden if necessary. For example, a component will inherit the configurations and identities of its container service; which in turn inherits the configurations and identities from the stack.
 
 <a name="components-of-a-kerberos-descriptor"></a>
 
@@ -96,13 +84,9 @@
 
 #### Stack-level Properties
 
-Stack-level properties is an optional set of name/value pairs that can be used in variable replacements.
-For example, if a property named "**_property1_**" exists with the value of "**_value1_**", then any instance of
-"**_${property1}_**" within a configuration property name or configuration property value will be replaced
-with "**_value1_**".
+Stack-level properties is an optional set of name/value pairs that can be used in variable replacements. For example, if a property named `**_property1_**` exists with the value of `**_value1_**`, then any instance of `**_${property1}_**` within a configuration property name or configuration property value will be replaced with `**_value1_**`.
 
-This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by
-lower-level descriptors.
+This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by lower-level descriptors.
 
 See [properties](#properties).
 
@@ -110,15 +94,7 @@
 
 #### Stack-level Identities
 
-Stack-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all services in the stack. An example of such an identity is the
-Ambari smoke test user, which is used by all services to perform service check operations. Service-
-and component-level identities may reference (and specialize) stack-level identities using the
-identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity
-with the name "smokeuser", then a service or a component may create an identity block that references
-and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser".  Within
-this identity block details of the identity may be and overwritten as necessary. This does not alter
-the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
+Stack-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all services in the stack. An example of such an identity is the Ambari smoke test user, which is used by all services to perform service check operations. Service- and component-level identities may reference (and specialize) stack-level identities using the identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity with the name "smokeuser", then a service or a component may create an identity block that references and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser". Within this identity block details of the identity may be and overwritten as necessary. This does not alter the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -126,9 +102,7 @@
 
 #### Stack-level Auth-to-local-properties
 
-Stack-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Stack-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -136,11 +110,7 @@
 
 #### Stack-level Configurations
 
-Stack-level configurations is an optional configurations block containing a list of zero or more
-configuration descriptors that are common among all services in the stack. Configuration descriptors
-are overridable due to the structure of the data.  However, overriding configuration properties may
-create undesired behavior since it is not known until after the Kerberization process is complete
-what value a property will have.
+Stack-level configurations is an optional configurations block containing a list of zero or more configuration descriptors that are common among all services in the stack. Configuration descriptors are overridable due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -148,8 +118,7 @@
 
 #### Services
 
-Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not
-list any services; however a service-level Kerberos Descriptor should contain at least one.
+Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not list any services; however a service-level Kerberos Descriptor should contain at least one.
 
 See [services](#services).
 
@@ -157,16 +126,9 @@
 
 #### Service-level Identities
 
-Service-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all components of the service. Component-level identities may
-reference (and specialize) service-level identities by specifying a relative or an absolute path
-to it.
+Service-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all components of the service. Component-level identities may reference (and specialize) service-level identities by specifying a relative or an absolute path to it.
 
-For example if there was a service-level identity with the name "service_identity", then a child
-component may create an identity block that references and specializes it by setting its "reference"
-attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as
-necessary. This does not override the service-level identity, it essentially creates a copy of it and
-updates the copy's properties.
+For example if there was a service-level identity with the name "service_identity", then a child component may create an identity block that references and specializes it by setting its "reference" attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as necessary. This does not override the service-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 ##### Examples
 
@@ -186,8 +148,7 @@
 }
 ```
 
-**Note**: By using the absolute path to an identity, any service-level identity may be referenced by
-any other service or component.
+**Note**: By using the absolute path to an identity, any service-level identity may be referenced by any other service or component.
 
 See [identities](#identities).
 
@@ -195,9 +156,7 @@
 
 #### Service-level Auth-to-local-properties
 
-Service-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Service-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -205,11 +164,7 @@
 
 #### Service-level Configurations
 
-Service-level configurations is an optional configurations block listing of zero or more configuration
-descriptors that are common among all components within a service. Configuration descriptors may be
-overridden due to the structure of the data. However, overriding configuration properties may create
-undesired behavior since it is not known until after the Kerberization process is complete what value
-a property will have.
+Service-level configurations is an optional configurations block listing of zero or more configuration descriptors that are common among all components within a service. Configuration descriptors may be overridden due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -225,11 +180,7 @@
 
 #### Component-level Identities
 
-Component-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are specific to the component. A Component-level identity may be referenced
-(and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`).
-This does not override the component-level identity, it essentially creates a copy of it and updates
-the copy's properties.
+Component-level identities is an optional identities block containing a list of zero or more identity descriptors that are specific to the component. A Component-level identity may be referenced (and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`). This does not override the component-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -237,9 +188,7 @@
 
 #### Component-level Auth-to-local-properties
 
-Component-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Component-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -247,19 +196,17 @@
 
 #### Component-level Configurations
 
-Component-level configurations is an optional configurations block listing zero or more configuration
-descriptors that are specific to the component.
+Component-level configurations is an optional configurations block listing zero or more configuration descriptors that are specific to the component.
 
 See [configurations](#configurations).
 
-### Descriptor Specifications
+### Kerberos Descriptor Specifications
 
 <a name="properties"></a>
 
 #### properties
 
-The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is
-a set of name/value pairs as follows:
+The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is a set of name/value pairs as follows:
 
 ```
 "properties" : {
@@ -273,14 +220,9 @@
 
 #### auth-to-local-properties
 
-The `auth-to-local-properties` block is valid in the stack-, service-, and component-level
-descriptors. This block is a list of configuration specifications
-(`config-type/property_name[|concatenation_scheme]`) indicating which properties contain
-auth-to-local rules that should be dynamically updated based on the identities used within the
-Kerberized cluster.
+The `auth-to-local-properties` block is valid in the stack-, service-, and component-level descriptors. This block is a list of configuration specifications (`config-type/property_name[|concatenation_scheme]`) indicating which properties contain auth-to-local rules that should be dynamically updated based on the identities used within the Kerberized cluster.
 
-The specification optionally declares the concatenation scheme to use to append
-the rules into a rule set value. If specified one of the following schemes may be set:
+The specification optionally declares the concatenation scheme to use to append the rules into a rule set value. If specified one of the following schemes may be set:
 
 - **`new_lines`** - rules in the rule set are separated by a new line (`\n`)
 - **`new_lines_escaped`** - rules in the rule set are separated by a `\` and a new line (`\n`)
@@ -300,15 +242,9 @@
 
 #### configurations
 
-A `configurations` block may exist in stack-, service-, and component-level descriptors.
-This block is a list of one or more configuration blocks containing a single structure named using
-the configuration type and containing values for each relevant property.
+A `configurations` block may exist in stack-, service-, and component-level descriptors. This block is a list of one or more configuration blocks containing a single structure named using the configuration type and containing values for each relevant property.
 
-Each property name and value may be a concrete value or contain variables to be replaced using values
-from the stack-level `properties` block or any available configuration. Properties from the `properties`
-block are referenced by name (`${property_name}`), configuration properties are reference by
-configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path
-(`principals/SERVICE/COMPONENT/principal_name`).
+Each property name and value may be a concrete value or contain variables to be replaced using values from the stack-level `properties` block or any available configuration. Properties from the `properties` block are referenced by name (`${property_name}`), configuration properties are reference by configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path (`principals/SERVICE/COMPONENT/principal_name`).
 
 ```
 "configurations" : [
@@ -329,8 +265,7 @@
 ]
 ```
 
-If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would
-effectively be translated to
+If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would effectively be translated to
 
 ```
 "configurations" : [
@@ -355,25 +290,11 @@
 
 #### identities
 
-An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block
-is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`,
-an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab`
-descriptor.
+An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`, an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab` descriptor.
 
-The `name` property of an `identity` descriptor should be a concrete name that is unique with in its
-`local` scope (stack, service, or component). However, to maintain backwards-compatibility with
-previous versions of Ambari, it may be a reference identifier to some other identity in the
-Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
+The `name` property of an `identity` descriptor should be a concrete name that is unique with in its `local` scope (stack, service, or component). However, to maintain backwards-compatibility with previous versions of Ambari, it may be a reference identifier to some other identity in the Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
 
-The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the
-properties from referenced identity is to be used as the base for the current identity and any properties
-specified in the local identity block overrides the base data. In this scenario, the base data is copied
-to the local identities and therefore changes are realized locally, not globally. Referenced identities
-may be hierarchical, so a referenced identity may reference another identity, and so on.  Because of
-this, care must be taken not to create cyclic references. Reference values must be in the form of a
-relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../`
-and may be specified in component-level identity descriptors to reference an identity descriptor
-in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
+The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the properties from referenced identity is to be used as the base for the current identity and any properties specified in the local identity block overrides the base data. In this scenario, the base data is copied to the local identities and therefore changes are realized locally, not globally. Referenced identities may be hierarchical, so a referenced identity may reference another identity, and so on. Because of this, care must be taken not to create cyclic references. Reference values must be in the form of a relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../` and may be specified in component-level identity descriptors to reference an identity descriptor in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
 
 - **Stack-level** identity reference: `/identitiy_name`
 - **Service-level** identity reference: `/SERVICE_NAME/identitiy_name`
@@ -407,31 +328,15 @@
 
 #### principal
 
-The `principal` block is an optional block inside an `identity` descriptor block. It declares the
-details about the identity’s principal, including the principal’s `value`, the `type` (user or service),
-the relevant `configuration` property, and a local username mapping. All properties are optional; however
-if no base or default value is available (via the parent identity's `reference` value) for all properties,
-the principal may be ignored.
+The `principal` block is an optional block inside an `identity` descriptor block. It declares the details about the identity’s principal, including the principal’s `value`, the `type` (user or service), the relevant `configuration` property, and a local username mapping. All properties are optional; however if no base or default value is available (via the parent identity's `reference` value) for all properties, the principal may be ignored.
 
-The `value` property of the principal is expected to be the normalized principal name, including the
-principal’s components and realm. In most cases, the realm should be specified using the realm variable
-(`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be
-used to represent the relevant hostname.  This value is typically replaced on the agent side by either
-the agent-side scripts or the services themselves to be the hostname of the current host. However the
-built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is
-not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
+The `value` property of the principal is expected to be the normalized principal name, including the principal’s components and realm. In most cases, the realm should be specified using the realm variable (`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be used to represent the relevant hostname. This value is typically replaced on the agent side by either the agent-side scripts or the services themselves to be the hostname of the current host. However the built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
 
-The `type` property of the principal may be either `user` or `service`. If not specified, the type is
-assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory.
-It is especially important in the Active Directory case due to how accounts are created. It also,
-indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface
-behavior and data caching.
+The `type` property of the principal may be either `user` or `service`. If not specified, the type is assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory. It is especially important in the Active Directory case due to how accounts are created. It also, indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface behavior and data caching.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to this principal's `value` (after its variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to this principal's `value` (after its variables have been replaced).
 
-The `local_username` property, if supplied, indicates which local user account to use when generating
-auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
+The `local_username` property, if supplied, indicates which local user account to use when generating auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
 
 ```
 "principal" : {
@@ -454,24 +359,15 @@
 
 #### keytab
 
-The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to
-create and store the relevant keytab file.  This block declares the keytab file's path in the local
-filesystem of the destination host, the permissions to assign to that file, and the relevant
-configuration property.
+The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to create and store the relevant keytab file. This block declares the keytab file's path in the local filesystem of the destination host, the permissions to assign to that file, and the relevant configuration property.
 
-The `file` property declares an absolute path to use to store the keytab file when distributing to
-relevant hosts. If this is not supplied, the keytab file will not be created.
+The `file` property declares an absolute path to use to store the keytab file when distributing to relevant hosts. If this is not supplied, the keytab file will not be created.
 
-The `owner` property is an optional block indicating the local user account to assign as the owner of
-the file and what access  (`"rw"` - read/write; `"r"` - read-only) should
-be granted to that user. By default, the owner will be given read-only access.
+The `owner` property is an optional block indicating the local user account to assign as the owner of the file and what access (`"rw"` - read/write; `"r"` - read-only) should be granted to that user. By default, the owner will be given read-only access.
 
-The `group` property is an optional block indicating which local group to assigned as the group owner
-of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted
-to local user accounts in that group. By default, the group will be given no access.
+The `group` property is an optional block indicating which local group to assigned as the group owner of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted to local user accounts in that group. By default, the group will be given no access.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to the path of this keytabs file (after any variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to the path of this keytabs file (after any variables have been replaced).
 
 ```
 "keytab" : {
@@ -492,11 +388,9 @@
 
 #### services
 
-A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file.
-This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
+A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file. This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
 
-Each service block contains a service `name`, and optionals `identities`,  `auth_to_local_properties`
-`configurations`, and `components` blocks.
+Each service block contains a service `name`, and optionals `identities`, `auth_to_local_properties` `configurations`, and `components` blocks.
 
 ```
 "services": [
@@ -538,10 +432,7 @@
 
 #### components
 
-A `components` block may exist within a `service` descriptor block. This block is a list of zero or
-more component descriptors belonging to the containing service descriptor. Each component descriptor
-is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`,
-and `configurations` blocks.
+A `components` block may exist within a `service` descriptor block. This block is a list of zero or more component descriptors belonging to the containing service descriptor. Each component descriptor is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`, and `configurations` blocks.
 
 ```
 "components": [
@@ -566,8 +457,8 @@
 ### Examples
 
 #### Example Stack-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```
 {
@@ -661,8 +552,8 @@
 ```
 
 #### Example Service-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```
 {
diff --git a/versioned_docs/version-2.7.5/ambari-design/stack-and-services/faq.md b/versioned_docs/version-2.7.5/ambari-design/stack-and-services/faq.md
index d19f838..06e68f2 100644
--- a/versioned_docs/version-2.7.5/ambari-design/stack-and-services/faq.md
+++ b/versioned_docs/version-2.7.5/ambari-design/stack-and-services/faq.md
@@ -6,18 +6,17 @@
 
 Ambari goes property by property and merge them from parent to child. So if you remove a category for example from the child it will be inherited from parent, that goes for pretty much all properties.
 
-So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like 
+So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like
 
+- For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
 
-* For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
+- The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
 
-* The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
-
-* Cardinality is overwritten by a child or take from the parent if child has not provided one.
+- Cardinality is overwritten by a child or take from the parent if child has not provided one.
 
 You could look at this method for more details: `org.apache.ambari.server.api.util.StackExtensionHelper#mergeServices`
 
-For more information see the [Service Inheritance](./custom-services.md#Service20%Inheritance) wiki page.
+For more information see the [Service Inheritance](./custom-services.md#service-inheritance) wiki page.
 
 **If a component is missing in the new definition but is present in the parent, does it get inherited?**
 
@@ -26,4 +25,3 @@
 **Configuration dependencies for the service -- are they overwritten or merged?**
 
 Overwritten.
-
diff --git a/versioned_docs/version-2.7.5/ambari-design/stack-and-services/stack-inheritance.md b/versioned_docs/version-2.7.5/ambari-design/stack-and-services/stack-inheritance.md
index 8d5184d..5fd3ffd 100644
--- a/versioned_docs/version-2.7.5/ambari-design/stack-and-services/stack-inheritance.md
+++ b/versioned_docs/version-2.7.5/ambari-design/stack-and-services/stack-inheritance.md
@@ -1,4 +1,3 @@
-
 # Stack Inheritance
 
 Each stack version must provide a metainfo.xml descriptor file which can declare whether the stack inherits from another stack version:
@@ -17,21 +16,21 @@
 
 The following files should not be redefined at the child stack version level:
 
-* properties/stack_features.json
-* properties/stack_tools.json
+- properties/stack_features.json
+- properties/stack_tools.json
 
 Note: These files should only exist at the base stack level.
 
 The following files if defined in the current stack version replace the definitions from the parent stack version:
 
-* kerberos.json
-* widgets.json
+- kerberos.json
+- widgets.json
 
 The following files if defined in the current stack version are merged with the parent stack version:
 
-* configuration/cluster-env.xml
+- configuration/cluster-env.xml
 
-* role_command_order.json
+- role_command_order.json
 
 Note: All the services' role command orders will be merge with the stack's role command order to provide a master list.
 
@@ -39,14 +38,14 @@
 
 The following directories if defined in the current stack version replace those from the parent stack version:
 
-* hooks
+- hooks
 
 This means the files included in those directories at the parent level will not be inherited. You will need to copy all the files you wish to keep from that directory structure.
 
 The following directories are not inherited:
 
-* repos
-* upgrades
+- repos
+- upgrades
 
 The repos/repoinfo.xml file should be defined in every stack version. The upgrades directory and its corresponding XML files should be defined in all stack versions that support upgrade.
 
@@ -59,10 +58,10 @@
       def __init__(self):
         super(HDP23StackAdvisor, self).__init__()
         Logger.initialize_logger()
- 
+
       def getComponentLayoutValidations(self, services, hosts):
         parentItems = super(HDP23StackAdvisor, self).getComponentLayoutValidations(services, hosts)
                  ...
 ```
 
-Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#Service20%Inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
+Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#service-inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
diff --git a/versioned_docs/version-2.7.5/ambari-design/views/index.md b/versioned_docs/version-2.7.5/ambari-design/views/index.md
index 66fc3f5..39a4ccc 100644
--- a/versioned_docs/version-2.7.5/ambari-design/views/index.md
+++ b/versioned_docs/version-2.7.5/ambari-design/views/index.md
@@ -1,39 +1,36 @@
 # Views
 
-:::info
-This capability is currently under development.
-:::info
+:::info This capability is currently under development. :::info
 
 **Ambari Views** offer a systematic way to plug-in UI capabilities to surface custom visualization, management and monitoring features in Ambari Web. A " **view**" is a way of extending Ambari that allows 3rd parties to plug in new resource types along with the APIs, providers and UI to support them. In other words, a view is an application that is deployed into the Ambari container.
 
-
 ## Useful Resources
 
-Resource | Link
----------|-------
-Views Overview  | http://www.slideshare.net/hortonworks/ambari-views-overview
-Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md
-Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples
+| Resource                 | Link                                                                   |
+| ------------------------ | ---------------------------------------------------------------------- |
+| Views Overview           | http://www.slideshare.net/hortonworks/ambari-views-overview            |
+| Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md |
+| Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples      |
 
 ## Terminology
 
 The following section describes the basic terminology associated with views.
 
-Term | Description
----------|-------
-View Name     | The name of the view. The view name identifies the view to Ambari.
-View Version  | The version of the view. A unique view name can have multiple versions deployed in Ambari.
-View Package  | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#View20%Package) for more information on the contents and structure of the package.
-View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features.
-View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information.
-View API  | The REST API for viewing the list of deployed views and creating view instances. See View API for more information.
-Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information.
+| Term | Description |
+| --- | --- |
+| View Name | The name of the view. The view name identifies the view to Ambari. |
+| View Version | The version of the view. A unique view name can have multiple versions deployed in Ambari. |
+| View Package | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#view-package) for more information on the contents and structure of the package. |
+| View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features. |
+| View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information. |
+| View API | The REST API for viewing the list of deployed views and creating view instances. See View API for more information. |
+| Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information. |
 
 ## Components of a View
 
 A view can consist of **client-side assets** (i.e. the UI that is exposed in Ambari Web) and **server-side resources** (i.e. the classes that expose REST end points). When the view loads into Ambari Web, the view UI can use the view server-side resources as necessary to deliver the view functionality.
 
-![Apache Ambari > Views > view-components.jpg](./imgs/view-components.jpg "Apache Ambari > Views > view-components.jpg")
+![Apache Ambari > Views > view-components.jpg](./imgs/view-components.jpg 'Apache Ambari > Views > view-components.jpg')
 
 ### Client-side Assets
 
@@ -43,11 +40,9 @@
 
 A view can expose resources as REST end points to be used in conjunction with the client-side to deliver the functionality of your view application. Thees resources are written in Java and can be anything from a servlet to a regular REST service to an Ambari ResourceProvider (i.e. a special type of REST service that handles some REST capabilities such as partial response and pagination – if you adhere to the Ambari ResourceProvider interface). See [Framework Services](./framework-services.md) for more information on capabilities that the framework exposes on the server-side for views.
 
-:::info
-Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
+:::info Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
 
-[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view)
-:::
+[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view) :::
 
 ## View Package
 
@@ -72,7 +67,7 @@
 ## Versions and Instances
 
 Multiple versions of a given view can be deployed into Ambari and multiple instances of each view can be created for each version. For example, I can have a view named FILES and deploy versions 0.1.0 and 0.2.0. I can then create instances of each version `FILES_0.1.0` and `FILES_0.2.0` allowing some Ambari users to have an older version of FILES (0.1.0), and other users to have a newer version (0.2.0).
- 
+
 ### Instance Configuration Parameters
 
 As part of a view definition, the instance configuration parameters are specified (i.e. "these parameters are needed to configure an instance of this view"). When you create a view instance, you specify the configuration parameters specific to that instance. Since parameters are scoped to a particular view instance, you can have multiple instances of a view, each instance configured differently.
@@ -87,11 +82,10 @@
 
 ![Apache Ambari > Views > view-lifecycle.png](./imgs/view-lifecycle.png)
 
-```_ ... _```
+`_ ... _`
 
 ### Instance Configuration Parameters
 
 As part of a view definition, the instance configuration parameters are specified (i.e. "these parameters are needed to configure an instance of this view"). When you create a view instance, you specify the configuration parameters specific to that instance. Since parameters are scoped to a particular view instance, you can have multiple instances of a view, each instance configured differently.
 
-Using the example above, I can create two instances of the `FILES_0.2.0` version, one instance that is configured a certain way and the second that is configured differently. This allows some Ambari users to use `FILES` one way, and other users a different way.
-```_ ... _```
+Using the example above, I can create two instances of the `FILES_0.2.0` version, one instance that is configured a certain way and the second that is configured differently. This allows some Ambari users to use `FILES` one way, and other users a different way. `_ ... _`
diff --git a/versioned_docs/version-2.7.5/ambari-dev/index.md b/versioned_docs/version-2.7.5/ambari-dev/index.md
index 3dba30d..fda2389 100644
--- a/versioned_docs/version-2.7.5/ambari-dev/index.md
+++ b/versioned_docs/version-2.7.5/ambari-dev/index.md
@@ -12,11 +12,9 @@
 
 Alternatively, you can easily launch a VM that is preconfigured with all the tools that you need. See the **Pre-Configured Development Environment** section in the [Quick Start Guide](../quick-start/quick-start-guide.md).
 
-* xCode (if using Mac - free download from the apple store)
-* JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
-* [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later
-Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files:
-File: ~/.profile
+- xCode (if using Mac - free download from the apple store)
+- JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
+- [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files: File: ~/.profile
 
 ```bash
 source ~/.bashrc
@@ -30,10 +28,8 @@
 export _JAVA_OPTIONS="-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true"
 ```
 
-
-* Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
-* Python setuptools:
-for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
+- Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
+- Python setuptools: for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
 
 ```bash
 sh setuptools-0.6c11-py2.6.egg
@@ -45,61 +41,53 @@
 sh setuptools-0.6c11-py2.7.egg
 ```
 
-
-* rpmbuild (rpm-build package)
-* g++ (gcc-c++ package)
+- rpmbuild (rpm-build package)
+- g++ (gcc-c++ package)
 
 ## Running Unit Tests
 
-* `mvn clean test`
-* Run unit tests in a single module:
+- `mvn clean test`
+- Run unit tests in a single module:
 
 ```bash
 mvn -pl ambari-server test
 ```
 
-
-* Run only Java tests:
+- Run only Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests
 ```
 
-
-* Run only specific Java tests:
+- Run only specific Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests -Dtest=AgentHostInfoTest test
 ```
 
-
-* Run only Python tests:
+- Run only Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests test
 ```
 
-
-* Run only specific Python tests:
+- Run only specific Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests -Dpython.test.mask=TestUtils.py test
 ```
 
-
-* Run only Checkstyle and RAT checks:
+- Run only Checkstyle and RAT checks:
 
 ```bash
 mvn -pl ambari-server -DskipTests test
 ```
 
-
-
 NOTE: Please make sure you have npm in the path before running the unit tests.
 
 ## Generating Findbugs Report
 
-* mvn clean install
+- mvn clean install
 
 This will generate xml and html report unders target/findbugs. You can also add flags to skip unit tests to generate report faster.
 
@@ -109,7 +97,7 @@
 
 To build Ambari RPMs, run the following.
 
-Note: Replace ${AMBARI_VERSION} with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
+Note: Replace `${AMBARI_VERSION}` with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
 
 **Note**: If running into errors while compiling the ambari-metrics package due to missing the artifacts of jms, jmxri, jmxtools:
 
@@ -134,14 +122,14 @@
 
 ## Setting the Version Using Maven
 
-Ambari 2.8+ uses a newer method to update the version when building Ambari. 
+Ambari 2.8+ uses a newer method to update the version when building Ambari.
 
 **RHEL/CentOS 6**:
 
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
- 
+
 mvn -B clean install package rpm:rpm -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -150,7 +138,7 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package rpm:rpm -DskipTests -Psuse11 -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -159,36 +147,35 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package jdeb:jdeb -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
 Ambari Server will create following packages
 
-* RPM will be created under `AMBARI_DIR`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
+- RPM will be created under `AMBARI_DIR`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
 
-* DEB will be created under `AMBARI_DIR`/ambari-server/target/
+- DEB will be created under `AMBARI_DIR`/ambari-server/target/
 
 Ambari Agent will create following packages
 
-* RPM will be created under `AMBARI_DIR`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
+- RPM will be created under `AMBARI_DIR`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
 
-* DEB will be created under `AMBARI_DIR`/ambari-agent/target
+- DEB will be created under `AMBARI_DIR`/ambari-agent/target
 
 Optional parameters:
 
-* -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
+- -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
 
-* -DdefaultStackVersion=STACK-VERSION
-* Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
-* -DenableExperimental=true
-* Enables experimental features to be available via Ambari Web (default is false)
-* All views can be packaged in RPM by adding _-Dviews_ parameter
+- -DdefaultStackVersion=STACK-VERSION
+- Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
+- -DenableExperimental=true
+- Enables experimental features to be available via Ambari Web (default is false)
+- All views can be packaged in RPM by adding _-Dviews_ parameter
   - _mvn -B clean install package rpm:rpm -Dviews -DskipTests_
-* Specific views can be built by adding `--projects` parameter to the _-Dviews_
+- Specific views can be built by adding `--projects` parameter to the _-Dviews_
   - _mvn -B clean install package rpm:rpm --projects ambari-web,ambari-project,ambari-views,ambari-admin,contrib/views/files,contrib/views/pig,ambari-server,ambari-agent,ambari-client,ambari-shell -Dviews -DskipTests_
 
-
 _NOTE: Run everything as `root` below._
 
 ## Building Ambari Metrics
diff --git a/versioned_docs/version-2.7.5/ambari-dev/releasing-ambari.md b/versioned_docs/version-2.7.5/ambari-dev/releasing-ambari.md
index 4091c2c..ab7717d 100644
--- a/versioned_docs/version-2.7.5/ambari-dev/releasing-ambari.md
+++ b/versioned_docs/version-2.7.5/ambari-dev/releasing-ambari.md
@@ -4,13 +4,13 @@
 
 ### [Publishing Maven Artifacts](http://apache.org/dev/publishing-maven-artifacts.html)
 
-* Setting up release signing keys
-* Uploading artifacts to staging and release repositories
+- Setting up release signing keys
+- Uploading artifacts to staging and release repositories
 
 ### [Apache Release Guidelines](http://www.apache.org/legal/release-policy.html)
 
-* Release requirements
-* Process for staging
+- Release requirements
+- Process for staging
 
 ## Preparing for release
 
@@ -61,10 +61,10 @@
 Copy over {username}.asc to {username}@home.apache.org:public_html/~{username}.asc
 Verify URL http://home.apache.org/~{username}/{username}.asc
 Query PGP KeyServer http://pgp.mit.edu:11371/pks/lookup?search=0x{key}&op=vindex
-  
+
 Web of Trust:
 Request others to sign your PGP key.
- 
+
 Login at http://id.apache.org
 Add OpenPGP Fingerprint to your profile
 OpenPGP Public Key Primary Fingerprint: XXXX YYYY ZZZZ ....
@@ -94,7 +94,7 @@
 
 Create a branch for a release using branch-X.Y (ex: branch-2.1) as the name of the branch.
 
-Note: Going forward, we should be creating branch-{majorVersion}.{minorVersion}, so that the same branch can be used for maintenance releases.
+Note: Going forward, we should be creating branch-[majorVersion].[minorVersion], so that the same branch can be used for maintenance releases.
 
 **Checkout the release branch**
 
@@ -145,9 +145,8 @@
 # Review and commit the changes to branch-X.Y
 git commit
 ```
-:::danger
-Ambari 2.7 and Earlier Releases (Deprecated)
-:::
+
+:::danger Ambari 2.7 and Earlier Releases (Deprecated) :::
 
 Older Ambari branches still required that you update every `pom.xml` manually through the below process:
 
@@ -216,7 +215,7 @@
 
 **Setup Build**
 
-Setup Jenkins Job for the new branch on http://builds.apache.org 
+Setup Jenkins Job for the new branch on http://builds.apache.org
 
 ## Creating Release Candidate
 
@@ -294,9 +293,9 @@
 
 **Call for a vote on the dev@ambari.apache.org mailing list with something like this:**
 
-I have created an ambari-** release candidate.
+I have created an ambari-\*\* release candidate.
 
-GIT source tag (r***)
+GIT source tag (r\*\*\*)
 
 ```
 https://git-wip-us.apache.org/repos/asf/ambari/repo?p=ambari.git;a=log;h=refs/tags/release-x.y.z-rc0
@@ -316,8 +315,8 @@
 
 ## Publishing and Announcement
 
-* Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
-* Upload your PGP public key only to _/home/_
+- Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
+- Upload your PGP public key only to _/home/_
 
 Publish the release as below:
 
@@ -388,9 +387,9 @@
 
 - Login to https://reporter.apache.org/addrelease.html?ambari with apache credentials.
 - Fill out the fields:
-    - Committe: ambari
-    - Full version name: 2.2.0
-    - Date of release (YYYY-MM-DD):  2015-12-19
+  - Committe: ambari
+  - Full version name: 2.2.0
+  - Date of release (YYYY-MM-DD): 2015-12-19
 - Submit the data
 - Verify that the submitted data is reflected at https://reporter.apache.org/?ambari
 
@@ -398,4 +397,4 @@
 
 ## Publish Ambari artifacts to Maven central
 
-Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.  
+Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.
diff --git a/versioned_docs/version-2.7.5/ambari-plugin-contribution/scom/installation.md b/versioned_docs/version-2.7.5/ambari-plugin-contribution/scom/installation.md
index 6032c1d..25ca698 100644
--- a/versioned_docs/version-2.7.5/ambari-plugin-contribution/scom/installation.md
+++ b/versioned_docs/version-2.7.5/ambari-plugin-contribution/scom/installation.md
@@ -1,18 +1,18 @@
 # Installation
 
-## Prerequisite Software 
+## Prerequisite Software
 
 Setting up Ambari SCOM assumes the following prerequisite software:
 
-* Ambari SCOM 1.0
+- Ambari SCOM 1.0
   - Apache Hadoop 1.x cluster (HDFS and MapReduce) 1
-* Ambari SCOM 2.0
+- Ambari SCOM 2.0
   - Apache Hadoop 2.x cluster (HDFS and YARN/MapReduce) 2
-* JDK 1.7
-* Microsoft SQL Server 2012
-* Microsoft JDBC Driver 4.0 for SQL Server 3
-* Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
-* System Center Monitoring Agent installed on **Watcher Node** 4
+- JDK 1.7
+- Microsoft SQL Server 2012
+- Microsoft JDBC Driver 4.0 for SQL Server 3
+- Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
+- System Center Monitoring Agent installed on **Watcher Node** 4
 
 1 _Ambari SCOM_ 1.0 has been tested with a Hadoop cluster based on **Hortonworks Data Platform 1.3 for Windows** ("[HDP 1.3 for Windows](http://hortonworks.com/products/releases/hdp-1-3-for-windows/)")
 
@@ -22,7 +22,7 @@
 
 4 See Microsoft TechNet topic for [Managing Discovery and Agents](http://technet.microsoft.com/en-us/library/hh212772.aspx). Minimum Agent requirements _.NET 4_ and _PowerShell 2.0 + 3.0_
 
-## Package Contents 
+## Package Contents
 
 ```
 ├─ ambari-scom- _**version**_.zip
@@ -33,25 +33,23 @@
 └── ambari-scom.msi
 ```
 
-File | Name | Description
------|------|-------------
-server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. 
-metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink.
-ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink
-mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software.
+| File | Name | Description |
+| --- | --- | --- |
+| server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. |
+| metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink. |
+| ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink |
+| mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software. |
 
 ## Ambari SCOM Server Installation
 
-:::caution
-The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#id-2installation-mgmtpack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below.
-:::
+:::caution The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#installing-ambari-scom-management-pack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below. :::
 
 The following methods are available for installing Ambari SCOM Server:
 
-* **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
-* **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration. 
+- **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
+- **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration.
 
-## Manual Installation 
+## Manual Installation
 
 ### Configuring SQL Server
 
@@ -63,11 +61,9 @@
 
 5. Create the Ambari SCOM database schema by running the `Hadoop-Metrics-SQLServer-CREATE.ddl` script.
 
-:::info
-The Hadoop Metrics DDL script will create a database called "HadoopMetrics".
-:::
+:::info The Hadoop Metrics DDL script will create a database called "HadoopMetrics". :::
 
-### Configuring Hadoop Metrics Sink 
+### Configuring Hadoop Metrics Sink
 
 #### Preparing the Metrics Sink
 
@@ -75,8 +71,7 @@
 
 2. Obtain the _Microsoft JDBC Driver 4.0 for SQL Server_ `sqljdbc4.jar` file.
 
-3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar`
-on each host.
+3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar` on each host.
 
 #### Setup Hadoop Metrics2 Interface
 
@@ -95,30 +90,27 @@
 reducetask.sink.sql.databaseUrl=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
+:::info _Where:_
 
-* _server = the SQL Server hostname_
-* _port = the SQL Server port (for example, 1433)_
-* _user = the SQL Server user (for example, sa)_
-* _password = the SQL Server password (for example, BigData1)_
-:::
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
 1. Update the Java classpath for each Hadoop service to include the `metrics-sink-<strong><em>version</em></strong>.jar` and `sqljdbc4.jar` files.
 
+   - Example: Updating the Java classpath for _HDP for Windows_ clusters
 
-    - Example: Updating the Java classpath for _HDP for Windows_ clusters
+     The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
 
-      The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
+     ```
+     ...
 
-        ```
-        ...
-        
-        ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
-        
-        ...
-        
-        ```
+     ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
+
+     ...
+
+     ```
 
 2. Restart Hadoop for these changes to take affect.
 
@@ -129,9 +121,8 @@
 ```sql
 select * from HadoopMetrics.dbo.MetricRecord
 ```
-:::info
-In the above SQL statement, `HadoopMetrics` is the database name.
-:::
+
+:::info In the above SQL statement, `HadoopMetrics` is the database name. :::
 
 ### Installing and Configuring Ambari SCOM Server
 
@@ -158,34 +149,30 @@
 scom.sink.db.url=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
-  - _server = the SQL Server hostname_
-  - _port = the SQL Server port (for example, 1433)_
-  - _user = the SQL Server user (for example, sa)_
-  - _password = the SQL Server password (for example, BigData1)_
-:::
+:::info _Where:_
 
-6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server. 
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
-:::info
-Be sure to include the following in the classpath:
-  - `ambari-scom-server-version.jar` file
-  - configuration folder containing the Ambari SCOM configuration files
-  - lib folder containing the Ambari SCOM dependencies
-  - folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
-  - `sqljdbc4.jar` SQLServer JDBC Driver file
-::
-  
+6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server.
+
+:::info Be sure to include the following in the classpath:
+
+- `ambari-scom-server-version.jar` file
+- configuration folder containing the Ambari SCOM configuration files
+- lib folder containing the Ambari SCOM dependencies
+- folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
+- `sqljdbc4.jar` SQLServer JDBC Driver file ::
+
 For example:
 
 ```bash
 java -server -XX:NewRatio=3 -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -XX:CMSInitiatingOccupancyFraction=60 -Xms512m -Xmx2048m -cp "c:\ambari-scom\server\conf;c:\ambari-scom\server\lib\*;c:\jdbc\sqljdbc4.jar;c:\hadoop\install\dir;c:\ambari-scom\server\ambari-scom-server-1.5.1.2.0.0.0-673.jar" org.apache.ambari.scom.AmbariServer
 ```
 
-:::info
-In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file.
-:::
+:::info In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file. :::
 
 #### Verify the Server API
 
@@ -194,6 +181,7 @@
 ```
 http://[ambari-scom-server]:8080/api/v1/clusters
 ```
+
 2. Verify that metrics are being reported.
 
 ```
@@ -217,33 +205,29 @@
 
 3. Run the `ambari-scom.msi` installer. The "Ambari SCOM Setup" dialog appears:
 
-     ![](./imgs/ambari-scom-msi2.png)
+   ![](./imgs/ambari-scom-msi2.png)
 
-4. Provide the following information: 
+4. Provide the following information:
 
-Field | Description
-------|------------
-Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari
-SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics.
-SQL Server port | The port of the SQL Server instance.
-SQL Server login | The login username.
-SQL Server password | The login password
-Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file.
-Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file.
+| Field | Description |
+| --- | --- |
+| Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari |
+| SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics. |
+| SQL Server port | The port of the SQL Server instance. |
+| SQL Server login | The login username. |
+| SQL Server password | The login password |
+| Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file. |
+| Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file. |
 
 5. You can optionally select to Start Services
 6. Click Install
 7. After completion, links are created on the desktop to "Start Ambari SCOM Server", "Browse Ambari API" and "Browse Ambari API Metrics". After starting the Ambari SCOM Server, browse the API and Metrics to confirm the server is working properly.
 
-:::info
-The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log`
-:::
+:::info The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log` :::
 
 ### Installing Ambari SCOM Management Pack
 
-:::info
-Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions.
-:::
+:::info Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions. :::
 
 #### Import the Management Pack
 
@@ -272,14 +256,13 @@
 Ambari.SCOM.Management.mpb
 Ambari.SCOM.Presentation.mpb
 ```
+
 10. Click "Open"
 11. Review the Import list and click "Install".
 
 12. The Ambari SCOM Management Pack installation will start.
 
-:::info
-The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release.
-:::
+:::info The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release. :::
 
 #### Create Run As Account
 
@@ -317,9 +300,7 @@
 http://[ambari-scom-server]:8080/api/
 ```
 
-:::info
-In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server.
-:::
+:::info In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server. :::
 
 6. Select the Run As Account that you created in Create Run As Account.
 
@@ -337,4 +318,4 @@
 
 ## Monitoring Scenarios
 
-[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
\ No newline at end of file
+[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
diff --git a/versioned_docs/version-2.7.6/ambari-design/kerberos/kerberos_descriptor.md b/versioned_docs/version-2.7.6/ambari-design/kerberos/kerberos_descriptor.md
index 2dd4798..871ca37 100644
--- a/versioned_docs/version-2.7.6/ambari-design/kerberos/kerberos_descriptor.md
+++ b/versioned_docs/version-2.7.6/ambari-design/kerberos/kerberos_descriptor.md
@@ -1,6 +1,7 @@
 ---
 title: The Kerberos Descriptor
 ---
+
 <!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
@@ -18,19 +19,18 @@
 limitations under the License.
 -->
 
-
 - [Introduction](index.md)
 - [The Kerberos Descriptor](#the-kerberos-descriptor)
   - [Components of a Kerberos Descriptor](#components-of-a-kerberos-descriptor)
     - [Stack-level Properties](#stack-level-properties)
     - [Stack-level Identities](#stack-level-identities)
     - [Stack-level Auth-to-local-properties](#stack-level-auth-to-local-properties)
-    - [Stack-level Configurations](#stack-level-configuratons)
+    - [Stack-level Configurations](#stack-level-configurations)
     - [Services](#services)
     - [Service-level Identities](#service-level-identities)
     - [Service-level Auth-to-local-properties](#service-level-auth-to-local-properties)
     - [Service-level Configurations](#service-level-configurations)
-    - [Components](#service-components)
+    - [Components](#components)
     - [Component-level Identities](#component-level-identities)
     - [Component-level Auth-to-local-properties](#component-level-auth-to-local-properties)
     - [Component-level Configurations](#component-level-configurations)
@@ -51,25 +51,15 @@
 
 ## The Kerberos Descriptor
 
-The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable
-or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should
-be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to
-be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details
-in the service-level descriptors.
+The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details in the service-level descriptors.
 
-For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This
-ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the
-relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
+For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
 
-For a component of a service to be Kerberized, there must be an entry for it in its containing service's
-service-level descriptor. This allows for some of a services' components to be managed and other
-components of that service to be ignored by the automated Kerberos facility.
+For a component of a service to be Kerberized, there must be an entry for it in its containing service's service-level descriptor. This allows for some of a services' components to be managed and other components of that service to be ignored by the automated Kerberos facility.
 
-Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full
-descriptor - partial descriptors are not allowed.
+Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full descriptor - partial descriptors are not allowed.
 
-A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors,
-and any updates from user input) has the following structure:
+A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors, and any updates from user input) has the following structure:
 
 - Stack-level Properties
 - Stack-level Identities
@@ -84,9 +74,7 @@
     - Component-level Auth-to-local-properties
     - Component-level Configurations
 
-Each level of the descriptor inherits the data from its parent. This data, however, may be overridden
-if necessary. For example, a component will inherit the configurations and identities of its container
-service; which in turn inherits the configurations and identities from the stack.
+Each level of the descriptor inherits the data from its parent. This data, however, may be overridden if necessary. For example, a component will inherit the configurations and identities of its container service; which in turn inherits the configurations and identities from the stack.
 
 <a name="components-of-a-kerberos-descriptor"></a>
 
@@ -96,13 +84,9 @@
 
 #### Stack-level Properties
 
-Stack-level properties is an optional set of name/value pairs that can be used in variable replacements.
-For example, if a property named "**_property1_**" exists with the value of "**_value1_**", then any instance of
-"**_${property1}_**" within a configuration property name or configuration property value will be replaced
-with "**_value1_**".
+Stack-level properties is an optional set of name/value pairs that can be used in variable replacements. For example, if a property named `**_property1_**` exists with the value of `**_value1_**`, then any instance of `**_${property1}_**` within a configuration property name or configuration property value will be replaced with `**_value1_**`.
 
-This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by
-lower-level descriptors.
+This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by lower-level descriptors.
 
 See [properties](#properties).
 
@@ -110,15 +94,7 @@
 
 #### Stack-level Identities
 
-Stack-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all services in the stack. An example of such an identity is the
-Ambari smoke test user, which is used by all services to perform service check operations. Service-
-and component-level identities may reference (and specialize) stack-level identities using the
-identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity
-with the name "smokeuser", then a service or a component may create an identity block that references
-and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser".  Within
-this identity block details of the identity may be and overwritten as necessary. This does not alter
-the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
+Stack-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all services in the stack. An example of such an identity is the Ambari smoke test user, which is used by all services to perform service check operations. Service- and component-level identities may reference (and specialize) stack-level identities using the identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity with the name "smokeuser", then a service or a component may create an identity block that references and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser". Within this identity block details of the identity may be and overwritten as necessary. This does not alter the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -126,9 +102,7 @@
 
 #### Stack-level Auth-to-local-properties
 
-Stack-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Stack-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -136,11 +110,7 @@
 
 #### Stack-level Configurations
 
-Stack-level configurations is an optional configurations block containing a list of zero or more
-configuration descriptors that are common among all services in the stack. Configuration descriptors
-are overridable due to the structure of the data.  However, overriding configuration properties may
-create undesired behavior since it is not known until after the Kerberization process is complete
-what value a property will have.
+Stack-level configurations is an optional configurations block containing a list of zero or more configuration descriptors that are common among all services in the stack. Configuration descriptors are overridable due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -148,8 +118,7 @@
 
 #### Services
 
-Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not
-list any services; however a service-level Kerberos Descriptor should contain at least one.
+Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not list any services; however a service-level Kerberos Descriptor should contain at least one.
 
 See [services](#services).
 
@@ -157,16 +126,9 @@
 
 #### Service-level Identities
 
-Service-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all components of the service. Component-level identities may
-reference (and specialize) service-level identities by specifying a relative or an absolute path
-to it.
+Service-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all components of the service. Component-level identities may reference (and specialize) service-level identities by specifying a relative or an absolute path to it.
 
-For example if there was a service-level identity with the name "service_identity", then a child
-component may create an identity block that references and specializes it by setting its "reference"
-attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as
-necessary. This does not override the service-level identity, it essentially creates a copy of it and
-updates the copy's properties.
+For example if there was a service-level identity with the name "service_identity", then a child component may create an identity block that references and specializes it by setting its "reference" attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as necessary. This does not override the service-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 ##### Examples
 
@@ -186,8 +148,7 @@
 }
 ```
 
-**Note**: By using the absolute path to an identity, any service-level identity may be referenced by
-any other service or component.
+**Note**: By using the absolute path to an identity, any service-level identity may be referenced by any other service or component.
 
 See [identities](#identities).
 
@@ -195,9 +156,7 @@
 
 #### Service-level Auth-to-local-properties
 
-Service-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Service-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -205,11 +164,7 @@
 
 #### Service-level Configurations
 
-Service-level configurations is an optional configurations block listing of zero or more configuration
-descriptors that are common among all components within a service. Configuration descriptors may be
-overridden due to the structure of the data. However, overriding configuration properties may create
-undesired behavior since it is not known until after the Kerberization process is complete what value
-a property will have.
+Service-level configurations is an optional configurations block listing of zero or more configuration descriptors that are common among all components within a service. Configuration descriptors may be overridden due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -225,11 +180,7 @@
 
 #### Component-level Identities
 
-Component-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are specific to the component. A Component-level identity may be referenced
-(and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`).
-This does not override the component-level identity, it essentially creates a copy of it and updates
-the copy's properties.
+Component-level identities is an optional identities block containing a list of zero or more identity descriptors that are specific to the component. A Component-level identity may be referenced (and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`). This does not override the component-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -237,9 +188,7 @@
 
 #### Component-level Auth-to-local-properties
 
-Component-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Component-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -247,19 +196,17 @@
 
 #### Component-level Configurations
 
-Component-level configurations is an optional configurations block listing zero or more configuration
-descriptors that are specific to the component.
+Component-level configurations is an optional configurations block listing zero or more configuration descriptors that are specific to the component.
 
 See [configurations](#configurations).
 
-### Descriptor Specifications
+### Kerberos Descriptor Specifications
 
 <a name="properties"></a>
 
 #### properties
 
-The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is
-a set of name/value pairs as follows:
+The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is a set of name/value pairs as follows:
 
 ```
 "properties" : {
@@ -273,14 +220,9 @@
 
 #### auth-to-local-properties
 
-The `auth-to-local-properties` block is valid in the stack-, service-, and component-level
-descriptors. This block is a list of configuration specifications
-(`config-type/property_name[|concatenation_scheme]`) indicating which properties contain
-auth-to-local rules that should be dynamically updated based on the identities used within the
-Kerberized cluster.
+The `auth-to-local-properties` block is valid in the stack-, service-, and component-level descriptors. This block is a list of configuration specifications (`config-type/property_name[|concatenation_scheme]`) indicating which properties contain auth-to-local rules that should be dynamically updated based on the identities used within the Kerberized cluster.
 
-The specification optionally declares the concatenation scheme to use to append
-the rules into a rule set value. If specified one of the following schemes may be set:
+The specification optionally declares the concatenation scheme to use to append the rules into a rule set value. If specified one of the following schemes may be set:
 
 - **`new_lines`** - rules in the rule set are separated by a new line (`\n`)
 - **`new_lines_escaped`** - rules in the rule set are separated by a `\` and a new line (`\n`)
@@ -300,15 +242,9 @@
 
 #### configurations
 
-A `configurations` block may exist in stack-, service-, and component-level descriptors.
-This block is a list of one or more configuration blocks containing a single structure named using
-the configuration type and containing values for each relevant property.
+A `configurations` block may exist in stack-, service-, and component-level descriptors. This block is a list of one or more configuration blocks containing a single structure named using the configuration type and containing values for each relevant property.
 
-Each property name and value may be a concrete value or contain variables to be replaced using values
-from the stack-level `properties` block or any available configuration. Properties from the `properties`
-block are referenced by name (`${property_name}`), configuration properties are reference by
-configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path
-(`principals/SERVICE/COMPONENT/principal_name`).
+Each property name and value may be a concrete value or contain variables to be replaced using values from the stack-level `properties` block or any available configuration. Properties from the `properties` block are referenced by name (`${property_name}`), configuration properties are reference by configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path (`principals/SERVICE/COMPONENT/principal_name`).
 
 ```
 "configurations" : [
@@ -329,8 +265,7 @@
 ]
 ```
 
-If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would
-effectively be translated to
+If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would effectively be translated to
 
 ```
 "configurations" : [
@@ -355,25 +290,11 @@
 
 #### identities
 
-An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block
-is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`,
-an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab`
-descriptor.
+An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`, an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab` descriptor.
 
-The `name` property of an `identity` descriptor should be a concrete name that is unique with in its
-`local` scope (stack, service, or component). However, to maintain backwards-compatibility with
-previous versions of Ambari, it may be a reference identifier to some other identity in the
-Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
+The `name` property of an `identity` descriptor should be a concrete name that is unique with in its `local` scope (stack, service, or component). However, to maintain backwards-compatibility with previous versions of Ambari, it may be a reference identifier to some other identity in the Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
 
-The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the
-properties from referenced identity is to be used as the base for the current identity and any properties
-specified in the local identity block overrides the base data. In this scenario, the base data is copied
-to the local identities and therefore changes are realized locally, not globally. Referenced identities
-may be hierarchical, so a referenced identity may reference another identity, and so on.  Because of
-this, care must be taken not to create cyclic references. Reference values must be in the form of a
-relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../`
-and may be specified in component-level identity descriptors to reference an identity descriptor
-in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
+The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the properties from referenced identity is to be used as the base for the current identity and any properties specified in the local identity block overrides the base data. In this scenario, the base data is copied to the local identities and therefore changes are realized locally, not globally. Referenced identities may be hierarchical, so a referenced identity may reference another identity, and so on. Because of this, care must be taken not to create cyclic references. Reference values must be in the form of a relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../` and may be specified in component-level identity descriptors to reference an identity descriptor in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
 
 - **Stack-level** identity reference: `/identitiy_name`
 - **Service-level** identity reference: `/SERVICE_NAME/identitiy_name`
@@ -407,31 +328,15 @@
 
 #### principal
 
-The `principal` block is an optional block inside an `identity` descriptor block. It declares the
-details about the identity’s principal, including the principal’s `value`, the `type` (user or service),
-the relevant `configuration` property, and a local username mapping. All properties are optional; however
-if no base or default value is available (via the parent identity's `reference` value) for all properties,
-the principal may be ignored.
+The `principal` block is an optional block inside an `identity` descriptor block. It declares the details about the identity’s principal, including the principal’s `value`, the `type` (user or service), the relevant `configuration` property, and a local username mapping. All properties are optional; however if no base or default value is available (via the parent identity's `reference` value) for all properties, the principal may be ignored.
 
-The `value` property of the principal is expected to be the normalized principal name, including the
-principal’s components and realm. In most cases, the realm should be specified using the realm variable
-(`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be
-used to represent the relevant hostname.  This value is typically replaced on the agent side by either
-the agent-side scripts or the services themselves to be the hostname of the current host. However the
-built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is
-not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
+The `value` property of the principal is expected to be the normalized principal name, including the principal’s components and realm. In most cases, the realm should be specified using the realm variable (`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be used to represent the relevant hostname. This value is typically replaced on the agent side by either the agent-side scripts or the services themselves to be the hostname of the current host. However the built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
 
-The `type` property of the principal may be either `user` or `service`. If not specified, the type is
-assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory.
-It is especially important in the Active Directory case due to how accounts are created. It also,
-indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface
-behavior and data caching.
+The `type` property of the principal may be either `user` or `service`. If not specified, the type is assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory. It is especially important in the Active Directory case due to how accounts are created. It also, indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface behavior and data caching.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to this principal's `value` (after its variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to this principal's `value` (after its variables have been replaced).
 
-The `local_username` property, if supplied, indicates which local user account to use when generating
-auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
+The `local_username` property, if supplied, indicates which local user account to use when generating auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
 
 ```
 "principal" : {
@@ -454,24 +359,15 @@
 
 #### keytab
 
-The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to
-create and store the relevant keytab file.  This block declares the keytab file's path in the local
-filesystem of the destination host, the permissions to assign to that file, and the relevant
-configuration property.
+The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to create and store the relevant keytab file. This block declares the keytab file's path in the local filesystem of the destination host, the permissions to assign to that file, and the relevant configuration property.
 
-The `file` property declares an absolute path to use to store the keytab file when distributing to
-relevant hosts. If this is not supplied, the keytab file will not be created.
+The `file` property declares an absolute path to use to store the keytab file when distributing to relevant hosts. If this is not supplied, the keytab file will not be created.
 
-The `owner` property is an optional block indicating the local user account to assign as the owner of
-the file and what access  (`"rw"` - read/write; `"r"` - read-only) should
-be granted to that user. By default, the owner will be given read-only access.
+The `owner` property is an optional block indicating the local user account to assign as the owner of the file and what access (`"rw"` - read/write; `"r"` - read-only) should be granted to that user. By default, the owner will be given read-only access.
 
-The `group` property is an optional block indicating which local group to assigned as the group owner
-of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted
-to local user accounts in that group. By default, the group will be given no access.
+The `group` property is an optional block indicating which local group to assigned as the group owner of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted to local user accounts in that group. By default, the group will be given no access.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to the path of this keytabs file (after any variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to the path of this keytabs file (after any variables have been replaced).
 
 ```
 "keytab" : {
@@ -492,11 +388,9 @@
 
 #### services
 
-A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file.
-This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
+A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file. This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
 
-Each service block contains a service `name`, and optionals `identities`,  `auth_to_local_properties`
-`configurations`, and `components` blocks.
+Each service block contains a service `name`, and optionals `identities`, `auth_to_local_properties` `configurations`, and `components` blocks.
 
 ```
 "services": [
@@ -538,10 +432,7 @@
 
 #### components
 
-A `components` block may exist within a `service` descriptor block. This block is a list of zero or
-more component descriptors belonging to the containing service descriptor. Each component descriptor
-is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`,
-and `configurations` blocks.
+A `components` block may exist within a `service` descriptor block. This block is a list of zero or more component descriptors belonging to the containing service descriptor. Each component descriptor is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`, and `configurations` blocks.
 
 ```
 "components": [
@@ -566,8 +457,8 @@
 ### Examples
 
 #### Example Stack-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```
 {
@@ -661,8 +552,8 @@
 ```
 
 #### Example Service-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```
 {
diff --git a/versioned_docs/version-2.7.6/ambari-design/stack-and-services/faq.md b/versioned_docs/version-2.7.6/ambari-design/stack-and-services/faq.md
index d19f838..06e68f2 100644
--- a/versioned_docs/version-2.7.6/ambari-design/stack-and-services/faq.md
+++ b/versioned_docs/version-2.7.6/ambari-design/stack-and-services/faq.md
@@ -6,18 +6,17 @@
 
 Ambari goes property by property and merge them from parent to child. So if you remove a category for example from the child it will be inherited from parent, that goes for pretty much all properties.
 
-So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like 
+So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like
 
+- For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
 
-* For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
+- The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
 
-* The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
-
-* Cardinality is overwritten by a child or take from the parent if child has not provided one.
+- Cardinality is overwritten by a child or take from the parent if child has not provided one.
 
 You could look at this method for more details: `org.apache.ambari.server.api.util.StackExtensionHelper#mergeServices`
 
-For more information see the [Service Inheritance](./custom-services.md#Service20%Inheritance) wiki page.
+For more information see the [Service Inheritance](./custom-services.md#service-inheritance) wiki page.
 
 **If a component is missing in the new definition but is present in the parent, does it get inherited?**
 
@@ -26,4 +25,3 @@
 **Configuration dependencies for the service -- are they overwritten or merged?**
 
 Overwritten.
-
diff --git a/versioned_docs/version-2.7.6/ambari-design/stack-and-services/stack-inheritance.md b/versioned_docs/version-2.7.6/ambari-design/stack-and-services/stack-inheritance.md
index 8d5184d..5fd3ffd 100644
--- a/versioned_docs/version-2.7.6/ambari-design/stack-and-services/stack-inheritance.md
+++ b/versioned_docs/version-2.7.6/ambari-design/stack-and-services/stack-inheritance.md
@@ -1,4 +1,3 @@
-
 # Stack Inheritance
 
 Each stack version must provide a metainfo.xml descriptor file which can declare whether the stack inherits from another stack version:
@@ -17,21 +16,21 @@
 
 The following files should not be redefined at the child stack version level:
 
-* properties/stack_features.json
-* properties/stack_tools.json
+- properties/stack_features.json
+- properties/stack_tools.json
 
 Note: These files should only exist at the base stack level.
 
 The following files if defined in the current stack version replace the definitions from the parent stack version:
 
-* kerberos.json
-* widgets.json
+- kerberos.json
+- widgets.json
 
 The following files if defined in the current stack version are merged with the parent stack version:
 
-* configuration/cluster-env.xml
+- configuration/cluster-env.xml
 
-* role_command_order.json
+- role_command_order.json
 
 Note: All the services' role command orders will be merge with the stack's role command order to provide a master list.
 
@@ -39,14 +38,14 @@
 
 The following directories if defined in the current stack version replace those from the parent stack version:
 
-* hooks
+- hooks
 
 This means the files included in those directories at the parent level will not be inherited. You will need to copy all the files you wish to keep from that directory structure.
 
 The following directories are not inherited:
 
-* repos
-* upgrades
+- repos
+- upgrades
 
 The repos/repoinfo.xml file should be defined in every stack version. The upgrades directory and its corresponding XML files should be defined in all stack versions that support upgrade.
 
@@ -59,10 +58,10 @@
       def __init__(self):
         super(HDP23StackAdvisor, self).__init__()
         Logger.initialize_logger()
- 
+
       def getComponentLayoutValidations(self, services, hosts):
         parentItems = super(HDP23StackAdvisor, self).getComponentLayoutValidations(services, hosts)
                  ...
 ```
 
-Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#Service20%Inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
+Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#service-inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
diff --git a/versioned_docs/version-2.7.6/ambari-design/views/index.md b/versioned_docs/version-2.7.6/ambari-design/views/index.md
index 4f047e6..ddd0ce9 100644
--- a/versioned_docs/version-2.7.6/ambari-design/views/index.md
+++ b/versioned_docs/version-2.7.6/ambari-design/views/index.md
@@ -1,33 +1,30 @@
 # Views
 
-:::info
-This capability is currently under development.
-:::info
+:::info This capability is currently under development. :::info
 
 **Ambari Views** offer a systematic way to plug-in UI capabilities to surface custom visualization, management and monitoring features in Ambari Web. A " **view**" is a way of extending Ambari that allows 3rd parties to plug in new resource types along with the APIs, providers and UI to support them. In other words, a view is an application that is deployed into the Ambari container.
 
-
 ## Useful Resources
 
-Resource | Link
----------|-------
-Views Overview  | http://www.slideshare.net/hortonworks/ambari-views-overview
-Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md
-Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples
+| Resource                 | Link                                                                   |
+| ------------------------ | ---------------------------------------------------------------------- |
+| Views Overview           | http://www.slideshare.net/hortonworks/ambari-views-overview            |
+| Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md |
+| Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples      |
 
 ## Terminology
 
 The following section describes the basic terminology associated with views.
 
-Term | Description
----------|-------
-View Name     | The name of the view. The view name identifies the view to Ambari.
-View Version  | The version of the view. A unique view name can have multiple versions deployed in Ambari.
-View Package  | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#View20%Package) for more information on the contents and structure of the package.
-View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features.
-View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information.
-View API  | The REST API for viewing the list of deployed views and creating view instances. See View API for more information.
-Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information.
+| Term | Description |
+| --- | --- |
+| View Name | The name of the view. The view name identifies the view to Ambari. |
+| View Version | The version of the view. A unique view name can have multiple versions deployed in Ambari. |
+| View Package | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#view-package) for more information on the contents and structure of the package. |
+| View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features. |
+| View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information. |
+| View API | The REST API for viewing the list of deployed views and creating view instances. See View API for more information. |
+| Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information. |
 
 ## Components of a View
 
@@ -43,11 +40,9 @@
 
 A view can expose resources as REST end points to be used in conjunction with the client-side to deliver the functionality of your view application. Thees resources are written in Java and can be anything from a servlet to a regular REST service to an Ambari ResourceProvider (i.e. a special type of REST service that handles some REST capabilities such as partial response and pagination – if you adhere to the Ambari ResourceProvider interface). See [Framework Services](./framework-services.md) for more information on capabilities that the framework exposes on the server-side for views.
 
-:::info
-Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
+:::info Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
 
-[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view)
-:::
+[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view) :::
 
 ## View Package
 
diff --git a/versioned_docs/version-2.7.6/ambari-dev/index.md b/versioned_docs/version-2.7.6/ambari-dev/index.md
index 3dba30d..fda2389 100644
--- a/versioned_docs/version-2.7.6/ambari-dev/index.md
+++ b/versioned_docs/version-2.7.6/ambari-dev/index.md
@@ -12,11 +12,9 @@
 
 Alternatively, you can easily launch a VM that is preconfigured with all the tools that you need. See the **Pre-Configured Development Environment** section in the [Quick Start Guide](../quick-start/quick-start-guide.md).
 
-* xCode (if using Mac - free download from the apple store)
-* JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
-* [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later
-Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files:
-File: ~/.profile
+- xCode (if using Mac - free download from the apple store)
+- JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
+- [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files: File: ~/.profile
 
 ```bash
 source ~/.bashrc
@@ -30,10 +28,8 @@
 export _JAVA_OPTIONS="-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true"
 ```
 
-
-* Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
-* Python setuptools:
-for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
+- Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
+- Python setuptools: for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
 
 ```bash
 sh setuptools-0.6c11-py2.6.egg
@@ -45,61 +41,53 @@
 sh setuptools-0.6c11-py2.7.egg
 ```
 
-
-* rpmbuild (rpm-build package)
-* g++ (gcc-c++ package)
+- rpmbuild (rpm-build package)
+- g++ (gcc-c++ package)
 
 ## Running Unit Tests
 
-* `mvn clean test`
-* Run unit tests in a single module:
+- `mvn clean test`
+- Run unit tests in a single module:
 
 ```bash
 mvn -pl ambari-server test
 ```
 
-
-* Run only Java tests:
+- Run only Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests
 ```
 
-
-* Run only specific Java tests:
+- Run only specific Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests -Dtest=AgentHostInfoTest test
 ```
 
-
-* Run only Python tests:
+- Run only Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests test
 ```
 
-
-* Run only specific Python tests:
+- Run only specific Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests -Dpython.test.mask=TestUtils.py test
 ```
 
-
-* Run only Checkstyle and RAT checks:
+- Run only Checkstyle and RAT checks:
 
 ```bash
 mvn -pl ambari-server -DskipTests test
 ```
 
-
-
 NOTE: Please make sure you have npm in the path before running the unit tests.
 
 ## Generating Findbugs Report
 
-* mvn clean install
+- mvn clean install
 
 This will generate xml and html report unders target/findbugs. You can also add flags to skip unit tests to generate report faster.
 
@@ -109,7 +97,7 @@
 
 To build Ambari RPMs, run the following.
 
-Note: Replace ${AMBARI_VERSION} with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
+Note: Replace `${AMBARI_VERSION}` with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
 
 **Note**: If running into errors while compiling the ambari-metrics package due to missing the artifacts of jms, jmxri, jmxtools:
 
@@ -134,14 +122,14 @@
 
 ## Setting the Version Using Maven
 
-Ambari 2.8+ uses a newer method to update the version when building Ambari. 
+Ambari 2.8+ uses a newer method to update the version when building Ambari.
 
 **RHEL/CentOS 6**:
 
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
- 
+
 mvn -B clean install package rpm:rpm -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -150,7 +138,7 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package rpm:rpm -DskipTests -Psuse11 -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -159,36 +147,35 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package jdeb:jdeb -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
 Ambari Server will create following packages
 
-* RPM will be created under `AMBARI_DIR`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
+- RPM will be created under `AMBARI_DIR`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
 
-* DEB will be created under `AMBARI_DIR`/ambari-server/target/
+- DEB will be created under `AMBARI_DIR`/ambari-server/target/
 
 Ambari Agent will create following packages
 
-* RPM will be created under `AMBARI_DIR`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
+- RPM will be created under `AMBARI_DIR`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
 
-* DEB will be created under `AMBARI_DIR`/ambari-agent/target
+- DEB will be created under `AMBARI_DIR`/ambari-agent/target
 
 Optional parameters:
 
-* -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
+- -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
 
-* -DdefaultStackVersion=STACK-VERSION
-* Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
-* -DenableExperimental=true
-* Enables experimental features to be available via Ambari Web (default is false)
-* All views can be packaged in RPM by adding _-Dviews_ parameter
+- -DdefaultStackVersion=STACK-VERSION
+- Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
+- -DenableExperimental=true
+- Enables experimental features to be available via Ambari Web (default is false)
+- All views can be packaged in RPM by adding _-Dviews_ parameter
   - _mvn -B clean install package rpm:rpm -Dviews -DskipTests_
-* Specific views can be built by adding `--projects` parameter to the _-Dviews_
+- Specific views can be built by adding `--projects` parameter to the _-Dviews_
   - _mvn -B clean install package rpm:rpm --projects ambari-web,ambari-project,ambari-views,ambari-admin,contrib/views/files,contrib/views/pig,ambari-server,ambari-agent,ambari-client,ambari-shell -Dviews -DskipTests_
 
-
 _NOTE: Run everything as `root` below._
 
 ## Building Ambari Metrics
diff --git a/versioned_docs/version-2.7.6/ambari-dev/releasing-ambari.md b/versioned_docs/version-2.7.6/ambari-dev/releasing-ambari.md
index 4091c2c..ab7717d 100644
--- a/versioned_docs/version-2.7.6/ambari-dev/releasing-ambari.md
+++ b/versioned_docs/version-2.7.6/ambari-dev/releasing-ambari.md
@@ -4,13 +4,13 @@
 
 ### [Publishing Maven Artifacts](http://apache.org/dev/publishing-maven-artifacts.html)
 
-* Setting up release signing keys
-* Uploading artifacts to staging and release repositories
+- Setting up release signing keys
+- Uploading artifacts to staging and release repositories
 
 ### [Apache Release Guidelines](http://www.apache.org/legal/release-policy.html)
 
-* Release requirements
-* Process for staging
+- Release requirements
+- Process for staging
 
 ## Preparing for release
 
@@ -61,10 +61,10 @@
 Copy over {username}.asc to {username}@home.apache.org:public_html/~{username}.asc
 Verify URL http://home.apache.org/~{username}/{username}.asc
 Query PGP KeyServer http://pgp.mit.edu:11371/pks/lookup?search=0x{key}&op=vindex
-  
+
 Web of Trust:
 Request others to sign your PGP key.
- 
+
 Login at http://id.apache.org
 Add OpenPGP Fingerprint to your profile
 OpenPGP Public Key Primary Fingerprint: XXXX YYYY ZZZZ ....
@@ -94,7 +94,7 @@
 
 Create a branch for a release using branch-X.Y (ex: branch-2.1) as the name of the branch.
 
-Note: Going forward, we should be creating branch-{majorVersion}.{minorVersion}, so that the same branch can be used for maintenance releases.
+Note: Going forward, we should be creating branch-[majorVersion].[minorVersion], so that the same branch can be used for maintenance releases.
 
 **Checkout the release branch**
 
@@ -145,9 +145,8 @@
 # Review and commit the changes to branch-X.Y
 git commit
 ```
-:::danger
-Ambari 2.7 and Earlier Releases (Deprecated)
-:::
+
+:::danger Ambari 2.7 and Earlier Releases (Deprecated) :::
 
 Older Ambari branches still required that you update every `pom.xml` manually through the below process:
 
@@ -216,7 +215,7 @@
 
 **Setup Build**
 
-Setup Jenkins Job for the new branch on http://builds.apache.org 
+Setup Jenkins Job for the new branch on http://builds.apache.org
 
 ## Creating Release Candidate
 
@@ -294,9 +293,9 @@
 
 **Call for a vote on the dev@ambari.apache.org mailing list with something like this:**
 
-I have created an ambari-** release candidate.
+I have created an ambari-\*\* release candidate.
 
-GIT source tag (r***)
+GIT source tag (r\*\*\*)
 
 ```
 https://git-wip-us.apache.org/repos/asf/ambari/repo?p=ambari.git;a=log;h=refs/tags/release-x.y.z-rc0
@@ -316,8 +315,8 @@
 
 ## Publishing and Announcement
 
-* Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
-* Upload your PGP public key only to _/home/_
+- Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
+- Upload your PGP public key only to _/home/_
 
 Publish the release as below:
 
@@ -388,9 +387,9 @@
 
 - Login to https://reporter.apache.org/addrelease.html?ambari with apache credentials.
 - Fill out the fields:
-    - Committe: ambari
-    - Full version name: 2.2.0
-    - Date of release (YYYY-MM-DD):  2015-12-19
+  - Committe: ambari
+  - Full version name: 2.2.0
+  - Date of release (YYYY-MM-DD): 2015-12-19
 - Submit the data
 - Verify that the submitted data is reflected at https://reporter.apache.org/?ambari
 
@@ -398,4 +397,4 @@
 
 ## Publish Ambari artifacts to Maven central
 
-Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.  
+Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.
diff --git a/versioned_docs/version-2.7.6/ambari-plugin-contribution/scom/installation.md b/versioned_docs/version-2.7.6/ambari-plugin-contribution/scom/installation.md
index 6032c1d..25ca698 100644
--- a/versioned_docs/version-2.7.6/ambari-plugin-contribution/scom/installation.md
+++ b/versioned_docs/version-2.7.6/ambari-plugin-contribution/scom/installation.md
@@ -1,18 +1,18 @@
 # Installation
 
-## Prerequisite Software 
+## Prerequisite Software
 
 Setting up Ambari SCOM assumes the following prerequisite software:
 
-* Ambari SCOM 1.0
+- Ambari SCOM 1.0
   - Apache Hadoop 1.x cluster (HDFS and MapReduce) 1
-* Ambari SCOM 2.0
+- Ambari SCOM 2.0
   - Apache Hadoop 2.x cluster (HDFS and YARN/MapReduce) 2
-* JDK 1.7
-* Microsoft SQL Server 2012
-* Microsoft JDBC Driver 4.0 for SQL Server 3
-* Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
-* System Center Monitoring Agent installed on **Watcher Node** 4
+- JDK 1.7
+- Microsoft SQL Server 2012
+- Microsoft JDBC Driver 4.0 for SQL Server 3
+- Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
+- System Center Monitoring Agent installed on **Watcher Node** 4
 
 1 _Ambari SCOM_ 1.0 has been tested with a Hadoop cluster based on **Hortonworks Data Platform 1.3 for Windows** ("[HDP 1.3 for Windows](http://hortonworks.com/products/releases/hdp-1-3-for-windows/)")
 
@@ -22,7 +22,7 @@
 
 4 See Microsoft TechNet topic for [Managing Discovery and Agents](http://technet.microsoft.com/en-us/library/hh212772.aspx). Minimum Agent requirements _.NET 4_ and _PowerShell 2.0 + 3.0_
 
-## Package Contents 
+## Package Contents
 
 ```
 ├─ ambari-scom- _**version**_.zip
@@ -33,25 +33,23 @@
 └── ambari-scom.msi
 ```
 
-File | Name | Description
------|------|-------------
-server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. 
-metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink.
-ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink
-mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software.
+| File | Name | Description |
+| --- | --- | --- |
+| server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. |
+| metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink. |
+| ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink |
+| mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software. |
 
 ## Ambari SCOM Server Installation
 
-:::caution
-The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#id-2installation-mgmtpack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below.
-:::
+:::caution The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#installing-ambari-scom-management-pack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below. :::
 
 The following methods are available for installing Ambari SCOM Server:
 
-* **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
-* **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration. 
+- **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
+- **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration.
 
-## Manual Installation 
+## Manual Installation
 
 ### Configuring SQL Server
 
@@ -63,11 +61,9 @@
 
 5. Create the Ambari SCOM database schema by running the `Hadoop-Metrics-SQLServer-CREATE.ddl` script.
 
-:::info
-The Hadoop Metrics DDL script will create a database called "HadoopMetrics".
-:::
+:::info The Hadoop Metrics DDL script will create a database called "HadoopMetrics". :::
 
-### Configuring Hadoop Metrics Sink 
+### Configuring Hadoop Metrics Sink
 
 #### Preparing the Metrics Sink
 
@@ -75,8 +71,7 @@
 
 2. Obtain the _Microsoft JDBC Driver 4.0 for SQL Server_ `sqljdbc4.jar` file.
 
-3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar`
-on each host.
+3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar` on each host.
 
 #### Setup Hadoop Metrics2 Interface
 
@@ -95,30 +90,27 @@
 reducetask.sink.sql.databaseUrl=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
+:::info _Where:_
 
-* _server = the SQL Server hostname_
-* _port = the SQL Server port (for example, 1433)_
-* _user = the SQL Server user (for example, sa)_
-* _password = the SQL Server password (for example, BigData1)_
-:::
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
 1. Update the Java classpath for each Hadoop service to include the `metrics-sink-<strong><em>version</em></strong>.jar` and `sqljdbc4.jar` files.
 
+   - Example: Updating the Java classpath for _HDP for Windows_ clusters
 
-    - Example: Updating the Java classpath for _HDP for Windows_ clusters
+     The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
 
-      The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
+     ```
+     ...
 
-        ```
-        ...
-        
-        ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
-        
-        ...
-        
-        ```
+     ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
+
+     ...
+
+     ```
 
 2. Restart Hadoop for these changes to take affect.
 
@@ -129,9 +121,8 @@
 ```sql
 select * from HadoopMetrics.dbo.MetricRecord
 ```
-:::info
-In the above SQL statement, `HadoopMetrics` is the database name.
-:::
+
+:::info In the above SQL statement, `HadoopMetrics` is the database name. :::
 
 ### Installing and Configuring Ambari SCOM Server
 
@@ -158,34 +149,30 @@
 scom.sink.db.url=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
-  - _server = the SQL Server hostname_
-  - _port = the SQL Server port (for example, 1433)_
-  - _user = the SQL Server user (for example, sa)_
-  - _password = the SQL Server password (for example, BigData1)_
-:::
+:::info _Where:_
 
-6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server. 
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
-:::info
-Be sure to include the following in the classpath:
-  - `ambari-scom-server-version.jar` file
-  - configuration folder containing the Ambari SCOM configuration files
-  - lib folder containing the Ambari SCOM dependencies
-  - folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
-  - `sqljdbc4.jar` SQLServer JDBC Driver file
-::
-  
+6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server.
+
+:::info Be sure to include the following in the classpath:
+
+- `ambari-scom-server-version.jar` file
+- configuration folder containing the Ambari SCOM configuration files
+- lib folder containing the Ambari SCOM dependencies
+- folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
+- `sqljdbc4.jar` SQLServer JDBC Driver file ::
+
 For example:
 
 ```bash
 java -server -XX:NewRatio=3 -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -XX:CMSInitiatingOccupancyFraction=60 -Xms512m -Xmx2048m -cp "c:\ambari-scom\server\conf;c:\ambari-scom\server\lib\*;c:\jdbc\sqljdbc4.jar;c:\hadoop\install\dir;c:\ambari-scom\server\ambari-scom-server-1.5.1.2.0.0.0-673.jar" org.apache.ambari.scom.AmbariServer
 ```
 
-:::info
-In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file.
-:::
+:::info In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file. :::
 
 #### Verify the Server API
 
@@ -194,6 +181,7 @@
 ```
 http://[ambari-scom-server]:8080/api/v1/clusters
 ```
+
 2. Verify that metrics are being reported.
 
 ```
@@ -217,33 +205,29 @@
 
 3. Run the `ambari-scom.msi` installer. The "Ambari SCOM Setup" dialog appears:
 
-     ![](./imgs/ambari-scom-msi2.png)
+   ![](./imgs/ambari-scom-msi2.png)
 
-4. Provide the following information: 
+4. Provide the following information:
 
-Field | Description
-------|------------
-Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari
-SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics.
-SQL Server port | The port of the SQL Server instance.
-SQL Server login | The login username.
-SQL Server password | The login password
-Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file.
-Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file.
+| Field | Description |
+| --- | --- |
+| Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari |
+| SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics. |
+| SQL Server port | The port of the SQL Server instance. |
+| SQL Server login | The login username. |
+| SQL Server password | The login password |
+| Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file. |
+| Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file. |
 
 5. You can optionally select to Start Services
 6. Click Install
 7. After completion, links are created on the desktop to "Start Ambari SCOM Server", "Browse Ambari API" and "Browse Ambari API Metrics". After starting the Ambari SCOM Server, browse the API and Metrics to confirm the server is working properly.
 
-:::info
-The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log`
-:::
+:::info The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log` :::
 
 ### Installing Ambari SCOM Management Pack
 
-:::info
-Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions.
-:::
+:::info Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions. :::
 
 #### Import the Management Pack
 
@@ -272,14 +256,13 @@
 Ambari.SCOM.Management.mpb
 Ambari.SCOM.Presentation.mpb
 ```
+
 10. Click "Open"
 11. Review the Import list and click "Install".
 
 12. The Ambari SCOM Management Pack installation will start.
 
-:::info
-The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release.
-:::
+:::info The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release. :::
 
 #### Create Run As Account
 
@@ -317,9 +300,7 @@
 http://[ambari-scom-server]:8080/api/
 ```
 
-:::info
-In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server.
-:::
+:::info In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server. :::
 
 6. Select the Run As Account that you created in Create Run As Account.
 
@@ -337,4 +318,4 @@
 
 ## Monitoring Scenarios
 
-[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
\ No newline at end of file
+[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
diff --git a/versioned_docs/version-3.0.0/ambari-design/kerberos/kerberos_descriptor.md b/versioned_docs/version-3.0.0/ambari-design/kerberos/kerberos_descriptor.md
index 2dd4798..871ca37 100644
--- a/versioned_docs/version-3.0.0/ambari-design/kerberos/kerberos_descriptor.md
+++ b/versioned_docs/version-3.0.0/ambari-design/kerberos/kerberos_descriptor.md
@@ -1,6 +1,7 @@
 ---
 title: The Kerberos Descriptor
 ---
+
 <!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
@@ -18,19 +19,18 @@
 limitations under the License.
 -->
 
-
 - [Introduction](index.md)
 - [The Kerberos Descriptor](#the-kerberos-descriptor)
   - [Components of a Kerberos Descriptor](#components-of-a-kerberos-descriptor)
     - [Stack-level Properties](#stack-level-properties)
     - [Stack-level Identities](#stack-level-identities)
     - [Stack-level Auth-to-local-properties](#stack-level-auth-to-local-properties)
-    - [Stack-level Configurations](#stack-level-configuratons)
+    - [Stack-level Configurations](#stack-level-configurations)
     - [Services](#services)
     - [Service-level Identities](#service-level-identities)
     - [Service-level Auth-to-local-properties](#service-level-auth-to-local-properties)
     - [Service-level Configurations](#service-level-configurations)
-    - [Components](#service-components)
+    - [Components](#components)
     - [Component-level Identities](#component-level-identities)
     - [Component-level Auth-to-local-properties](#component-level-auth-to-local-properties)
     - [Component-level Configurations](#component-level-configurations)
@@ -51,25 +51,15 @@
 
 ## The Kerberos Descriptor
 
-The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable
-or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should
-be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to
-be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details
-in the service-level descriptors.
+The Kerberos Descriptor is a JSON-formatted text file containing information needed by Ambari to enable or disable Kerberos for a stack and its services. This file must be named **_kerberos.json_** and should be in the root directory of the relevant stack or service definition. Kerberos Descriptors are meant to be hierarchical such that details in the stack-level descriptor can be overwritten (or updated) by details in the service-level descriptors.
 
-For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This
-ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the
-relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
+For the services in a stack to be Kerberized, there must be a stack-level Kerberos Descriptor. This ensures that even if a common service has a Kerberos Descriptor, it may not be Kerberized unless the relevant stack indicates that supports Kerberos by having a stack-level Kerberos Descriptor.
 
-For a component of a service to be Kerberized, there must be an entry for it in its containing service's
-service-level descriptor. This allows for some of a services' components to be managed and other
-components of that service to be ignored by the automated Kerberos facility.
+For a component of a service to be Kerberized, there must be an entry for it in its containing service's service-level descriptor. This allows for some of a services' components to be managed and other components of that service to be ignored by the automated Kerberos facility.
 
-Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full
-descriptor - partial descriptors are not allowed.
+Kerberos Descriptors are inherited from the base stack or service, but may be overridden as a full descriptor - partial descriptors are not allowed.
 
-A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors,
-and any updates from user input) has the following structure:
+A complete descriptor (which is built using the stack-level descriptor, the service-level descriptors, and any updates from user input) has the following structure:
 
 - Stack-level Properties
 - Stack-level Identities
@@ -84,9 +74,7 @@
     - Component-level Auth-to-local-properties
     - Component-level Configurations
 
-Each level of the descriptor inherits the data from its parent. This data, however, may be overridden
-if necessary. For example, a component will inherit the configurations and identities of its container
-service; which in turn inherits the configurations and identities from the stack.
+Each level of the descriptor inherits the data from its parent. This data, however, may be overridden if necessary. For example, a component will inherit the configurations and identities of its container service; which in turn inherits the configurations and identities from the stack.
 
 <a name="components-of-a-kerberos-descriptor"></a>
 
@@ -96,13 +84,9 @@
 
 #### Stack-level Properties
 
-Stack-level properties is an optional set of name/value pairs that can be used in variable replacements.
-For example, if a property named "**_property1_**" exists with the value of "**_value1_**", then any instance of
-"**_${property1}_**" within a configuration property name or configuration property value will be replaced
-with "**_value1_**".
+Stack-level properties is an optional set of name/value pairs that can be used in variable replacements. For example, if a property named `**_property1_**` exists with the value of `**_value1_**`, then any instance of `**_${property1}_**` within a configuration property name or configuration property value will be replaced with `**_value1_**`.
 
-This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by
-lower-level descriptors.
+This property is only relevant in the stack-level Kerberos Descriptor and may not be overridden by lower-level descriptors.
 
 See [properties](#properties).
 
@@ -110,15 +94,7 @@
 
 #### Stack-level Identities
 
-Stack-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all services in the stack. An example of such an identity is the
-Ambari smoke test user, which is used by all services to perform service check operations. Service-
-and component-level identities may reference (and specialize) stack-level identities using the
-identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity
-with the name "smokeuser", then a service or a component may create an identity block that references
-and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser".  Within
-this identity block details of the identity may be and overwritten as necessary. This does not alter
-the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
+Stack-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all services in the stack. An example of such an identity is the Ambari smoke test user, which is used by all services to perform service check operations. Service- and component-level identities may reference (and specialize) stack-level identities using the identity’s name with a forward slash (/) preceding it. For example if there was a stack-level identity with the name "smokeuser", then a service or a component may create an identity block that references and specializes it by declaring a "**_reference_**" property and setting it to "/smokeuser". Within this identity block details of the identity may be and overwritten as necessary. This does not alter the stack-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -126,9 +102,7 @@
 
 #### Stack-level Auth-to-local-properties
 
-Stack-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Stack-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -136,11 +110,7 @@
 
 #### Stack-level Configurations
 
-Stack-level configurations is an optional configurations block containing a list of zero or more
-configuration descriptors that are common among all services in the stack. Configuration descriptors
-are overridable due to the structure of the data.  However, overriding configuration properties may
-create undesired behavior since it is not known until after the Kerberization process is complete
-what value a property will have.
+Stack-level configurations is an optional configurations block containing a list of zero or more configuration descriptors that are common among all services in the stack. Configuration descriptors are overridable due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -148,8 +118,7 @@
 
 #### Services
 
-Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not
-list any services; however a service-level Kerberos Descriptor should contain at least one.
+Services is a list of zero or more service descriptors. A stack-level Kerberos Descriptor should not list any services; however a service-level Kerberos Descriptor should contain at least one.
 
 See [services](#services).
 
@@ -157,16 +126,9 @@
 
 #### Service-level Identities
 
-Service-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are common among all components of the service. Component-level identities may
-reference (and specialize) service-level identities by specifying a relative or an absolute path
-to it.
+Service-level identities is an optional identities block containing a list of zero or more identity descriptors that are common among all components of the service. Component-level identities may reference (and specialize) service-level identities by specifying a relative or an absolute path to it.
 
-For example if there was a service-level identity with the name "service_identity", then a child
-component may create an identity block that references and specializes it by setting its "reference"
-attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as
-necessary. This does not override the service-level identity, it essentially creates a copy of it and
-updates the copy's properties.
+For example if there was a service-level identity with the name "service_identity", then a child component may create an identity block that references and specializes it by setting its "reference" attribute to "../service_identity" or "/service_name/service_identity" and overriding any values as necessary. This does not override the service-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 ##### Examples
 
@@ -186,8 +148,7 @@
 }
 ```
 
-**Note**: By using the absolute path to an identity, any service-level identity may be referenced by
-any other service or component.
+**Note**: By using the absolute path to an identity, any service-level identity may be referenced by any other service or component.
 
 See [identities](#identities).
 
@@ -195,9 +156,7 @@
 
 #### Service-level Auth-to-local-properties
 
-Service-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Service-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -205,11 +164,7 @@
 
 #### Service-level Configurations
 
-Service-level configurations is an optional configurations block listing of zero or more configuration
-descriptors that are common among all components within a service. Configuration descriptors may be
-overridden due to the structure of the data. However, overriding configuration properties may create
-undesired behavior since it is not known until after the Kerberization process is complete what value
-a property will have.
+Service-level configurations is an optional configurations block listing of zero or more configuration descriptors that are common among all components within a service. Configuration descriptors may be overridden due to the structure of the data. However, overriding configuration properties may create undesired behavior since it is not known until after the Kerberization process is complete what value a property will have.
 
 See [configurations](#configurations).
 
@@ -225,11 +180,7 @@
 
 #### Component-level Identities
 
-Component-level identities is an optional identities block containing a list of zero or more identity
-descriptors that are specific to the component. A Component-level identity may be referenced
-(and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`).
-This does not override the component-level identity, it essentially creates a copy of it and updates
-the copy's properties.
+Component-level identities is an optional identities block containing a list of zero or more identity descriptors that are specific to the component. A Component-level identity may be referenced (and specialized) by using the absolute path to it (`/service_name/component_name/identity_name`). This does not override the component-level identity, it essentially creates a copy of it and updates the copy's properties.
 
 See [identities](#identities).
 
@@ -237,9 +188,7 @@
 
 #### Component-level Auth-to-local-properties
 
-Component-level auth-to-local-properties is an optional list of zero or more configuration property
-specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should
-be updated with dynamically generated auto-to-local rule sets.
+Component-level auth-to-local-properties is an optional list of zero or more configuration property specifications `(config-type/property_name[|concatenation_scheme])` indicating which properties should be updated with dynamically generated auto-to-local rule sets.
 
 See [auth-to-local-properties](#auth-to-local-properties).
 
@@ -247,19 +196,17 @@
 
 #### Component-level Configurations
 
-Component-level configurations is an optional configurations block listing zero or more configuration
-descriptors that are specific to the component.
+Component-level configurations is an optional configurations block listing zero or more configuration descriptors that are specific to the component.
 
 See [configurations](#configurations).
 
-### Descriptor Specifications
+### Kerberos Descriptor Specifications
 
 <a name="properties"></a>
 
 #### properties
 
-The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is
-a set of name/value pairs as follows:
+The `properties` block is only valid in the service-level Kerberos Descriptor file. This block is a set of name/value pairs as follows:
 
 ```
 "properties" : {
@@ -273,14 +220,9 @@
 
 #### auth-to-local-properties
 
-The `auth-to-local-properties` block is valid in the stack-, service-, and component-level
-descriptors. This block is a list of configuration specifications
-(`config-type/property_name[|concatenation_scheme]`) indicating which properties contain
-auth-to-local rules that should be dynamically updated based on the identities used within the
-Kerberized cluster.
+The `auth-to-local-properties` block is valid in the stack-, service-, and component-level descriptors. This block is a list of configuration specifications (`config-type/property_name[|concatenation_scheme]`) indicating which properties contain auth-to-local rules that should be dynamically updated based on the identities used within the Kerberized cluster.
 
-The specification optionally declares the concatenation scheme to use to append
-the rules into a rule set value. If specified one of the following schemes may be set:
+The specification optionally declares the concatenation scheme to use to append the rules into a rule set value. If specified one of the following schemes may be set:
 
 - **`new_lines`** - rules in the rule set are separated by a new line (`\n`)
 - **`new_lines_escaped`** - rules in the rule set are separated by a `\` and a new line (`\n`)
@@ -300,15 +242,9 @@
 
 #### configurations
 
-A `configurations` block may exist in stack-, service-, and component-level descriptors.
-This block is a list of one or more configuration blocks containing a single structure named using
-the configuration type and containing values for each relevant property.
+A `configurations` block may exist in stack-, service-, and component-level descriptors. This block is a list of one or more configuration blocks containing a single structure named using the configuration type and containing values for each relevant property.
 
-Each property name and value may be a concrete value or contain variables to be replaced using values
-from the stack-level `properties` block or any available configuration. Properties from the `properties`
-block are referenced by name (`${property_name}`), configuration properties are reference by
-configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path
-(`principals/SERVICE/COMPONENT/principal_name`).
+Each property name and value may be a concrete value or contain variables to be replaced using values from the stack-level `properties` block or any available configuration. Properties from the `properties` block are referenced by name (`${property_name}`), configuration properties are reference by configuration specification (`${config-type/property_name}`) and kerberos principals are referenced by the principal path (`principals/SERVICE/COMPONENT/principal_name`).
 
 ```
 "configurations" : [
@@ -329,8 +265,7 @@
 ]
 ```
 
-If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would
-effectively be translated to
+If `cluster-env/smokuser` was `"ambari-qa"` and realm was `"EXAMPLE.COM"`, the above block would effectively be translated to
 
 ```
 "configurations" : [
@@ -355,25 +290,11 @@
 
 #### identities
 
-An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block
-is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`,
-an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab`
-descriptor.
+An `identities` descriptor may exist in stack-, service-, and component-level descriptors. This block is a list of zero or more identity descriptors. Each identity descriptor is a block containing a `name`, an optional `reference` identifier, an optional `principal` descriptor, and an optional `keytab` descriptor.
 
-The `name` property of an `identity` descriptor should be a concrete name that is unique with in its
-`local` scope (stack, service, or component). However, to maintain backwards-compatibility with
-previous versions of Ambari, it may be a reference identifier to some other identity in the
-Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
+The `name` property of an `identity` descriptor should be a concrete name that is unique with in its `local` scope (stack, service, or component). However, to maintain backwards-compatibility with previous versions of Ambari, it may be a reference identifier to some other identity in the Kerberos Descriptor. This feature is deprecated and may not be available in future versions of Ambari.
 
-The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the
-properties from referenced identity is to be used as the base for the current identity and any properties
-specified in the local identity block overrides the base data. In this scenario, the base data is copied
-to the local identities and therefore changes are realized locally, not globally. Referenced identities
-may be hierarchical, so a referenced identity may reference another identity, and so on.  Because of
-this, care must be taken not to create cyclic references. Reference values must be in the form of a
-relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../`
-and may be specified in component-level identity descriptors to reference an identity descriptor
-in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
+The `reference` property of an `identitiy` descriptor is optional. If it exists, it indicates that the properties from referenced identity is to be used as the base for the current identity and any properties specified in the local identity block overrides the base data. In this scenario, the base data is copied to the local identities and therefore changes are realized locally, not globally. Referenced identities may be hierarchical, so a referenced identity may reference another identity, and so on. Because of this, care must be taken not to create cyclic references. Reference values must be in the form of a relative or absolute _path_ to the referenced identity descriptor. Relative _paths_ start with a `../` and may be specified in component-level identity descriptors to reference an identity descriptor in the parent service. Absolute _paths_ start with a `/` and may be specified at any level as follows:
 
 - **Stack-level** identity reference: `/identitiy_name`
 - **Service-level** identity reference: `/SERVICE_NAME/identitiy_name`
@@ -407,31 +328,15 @@
 
 #### principal
 
-The `principal` block is an optional block inside an `identity` descriptor block. It declares the
-details about the identity’s principal, including the principal’s `value`, the `type` (user or service),
-the relevant `configuration` property, and a local username mapping. All properties are optional; however
-if no base or default value is available (via the parent identity's `reference` value) for all properties,
-the principal may be ignored.
+The `principal` block is an optional block inside an `identity` descriptor block. It declares the details about the identity’s principal, including the principal’s `value`, the `type` (user or service), the relevant `configuration` property, and a local username mapping. All properties are optional; however if no base or default value is available (via the parent identity's `reference` value) for all properties, the principal may be ignored.
 
-The `value` property of the principal is expected to be the normalized principal name, including the
-principal’s components and realm. In most cases, the realm should be specified using the realm variable
-(`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be
-used to represent the relevant hostname.  This value is typically replaced on the agent side by either
-the agent-side scripts or the services themselves to be the hostname of the current host. However the
-built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is
-not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
+The `value` property of the principal is expected to be the normalized principal name, including the principal’s components and realm. In most cases, the realm should be specified using the realm variable (`${realm}` or `${kerberos-env/realm}`). Also, in the case of a service principal, "`_HOST`" should be used to represent the relevant hostname. This value is typically replaced on the agent side by either the agent-side scripts or the services themselves to be the hostname of the current host. However the built-in hostname variable (`${hostname}`) may be used if "`_HOST`" replacement on the agent-side is not available for the service. Examples: `smokeuser@${realm}`, `service/_HOST@${realm}`.
 
-The `type` property of the principal may be either `user` or `service`. If not specified, the type is
-assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory.
-It is especially important in the Active Directory case due to how accounts are created. It also,
-indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface
-behavior and data caching.
+The `type` property of the principal may be either `user` or `service`. If not specified, the type is assumed to be `user`. This value dictates how the identity is to be created in the KDC or Active Directory. It is especially important in the Active Directory case due to how accounts are created. It also, indicates to Ambari how to handle the principal and relevant keytab file reguarding the user interface behavior and data caching.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to this principal's `value` (after its variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to this principal's `value` (after its variables have been replaced).
 
-The `local_username` property, if supplied, indicates which local user account to use when generating
-auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
+The `local_username` property, if supplied, indicates which local user account to use when generating auth-to-local rules for this identity. If not specified, no explicit auth-to-local rule will be generated.
 
 ```
 "principal" : {
@@ -454,24 +359,15 @@
 
 #### keytab
 
-The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to
-create and store the relevant keytab file.  This block declares the keytab file's path in the local
-filesystem of the destination host, the permissions to assign to that file, and the relevant
-configuration property.
+The `keytab` block is an optional block inside an `identity` descriptor block. It describes how to create and store the relevant keytab file. This block declares the keytab file's path in the local filesystem of the destination host, the permissions to assign to that file, and the relevant configuration property.
 
-The `file` property declares an absolute path to use to store the keytab file when distributing to
-relevant hosts. If this is not supplied, the keytab file will not be created.
+The `file` property declares an absolute path to use to store the keytab file when distributing to relevant hosts. If this is not supplied, the keytab file will not be created.
 
-The `owner` property is an optional block indicating the local user account to assign as the owner of
-the file and what access  (`"rw"` - read/write; `"r"` - read-only) should
-be granted to that user. By default, the owner will be given read-only access.
+The `owner` property is an optional block indicating the local user account to assign as the owner of the file and what access (`"rw"` - read/write; `"r"` - read-only) should be granted to that user. By default, the owner will be given read-only access.
 
-The `group` property is an optional block indicating which local group to assigned as the group owner
-of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted
-to local user accounts in that group. By default, the group will be given no access.
+The `group` property is an optional block indicating which local group to assigned as the group owner of the file and what access (`"rw"` - read/write; `"r"` - read-only; `“”` - no access) should be granted to local user accounts in that group. By default, the group will be given no access.
 
-The `configuration` property is an optional configuration specification (`config-type/property_name`)
-that is to be set to the path of this keytabs file (after any variables have been replaced).
+The `configuration` property is an optional configuration specification (`config-type/property_name`) that is to be set to the path of this keytabs file (after any variables have been replaced).
 
 ```
 "keytab" : {
@@ -492,11 +388,9 @@
 
 #### services
 
-A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file.
-This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
+A `services` block may exist in the stack-level and the service-level Kerberos Descriptor file. This block is a list of zero or more service descriptors to add to the Kerberos Descriptor.
 
-Each service block contains a service `name`, and optionals `identities`,  `auth_to_local_properties`
-`configurations`, and `components` blocks.
+Each service block contains a service `name`, and optionals `identities`, `auth_to_local_properties` `configurations`, and `components` blocks.
 
 ```
 "services": [
@@ -538,10 +432,7 @@
 
 #### components
 
-A `components` block may exist within a `service` descriptor block. This block is a list of zero or
-more component descriptors belonging to the containing service descriptor. Each component descriptor
-is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`,
-and `configurations` blocks.
+A `components` block may exist within a `service` descriptor block. This block is a list of zero or more component descriptors belonging to the containing service descriptor. Each component descriptor is a block containing a component `name`, and optional `identities`, `auth_to_local_properties`, and `configurations` blocks.
 
 ```
 "components": [
@@ -566,8 +457,8 @@
 ### Examples
 
 #### Example Stack-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```
 {
@@ -661,8 +552,8 @@
 ```
 
 #### Example Service-level Kerberos Descriptor
-The following example is annotated for descriptive purposes. The annotations are not valid in a real
-JSON-formatted file.
+
+The following example is annotated for descriptive purposes. The annotations are not valid in a real JSON-formatted file.
 
 ```
 {
diff --git a/versioned_docs/version-3.0.0/ambari-design/stack-and-services/faq.md b/versioned_docs/version-3.0.0/ambari-design/stack-and-services/faq.md
index d19f838..06e68f2 100644
--- a/versioned_docs/version-3.0.0/ambari-design/stack-and-services/faq.md
+++ b/versioned_docs/version-3.0.0/ambari-design/stack-and-services/faq.md
@@ -6,18 +6,17 @@
 
 Ambari goes property by property and merge them from parent to child. So if you remove a category for example from the child it will be inherited from parent, that goes for pretty much all properties.
 
-So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like 
+So, the question is how do we tackle existence of a property in both parent and child. Here, most of the decision still follow same paradigm as take the child value instead of parent and every property in parent, not explicitly deleted from child using a marker like
 
+- For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
 
-* For config-dependencies, we take all or nothing approach, if this property exists in child use it and all of its children else take it from parent.
+- The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
 
-* The custom commands are merged based on names, such that merged definition is a union of commands with child commands with same name overriding those fro parent.
-
-* Cardinality is overwritten by a child or take from the parent if child has not provided one.
+- Cardinality is overwritten by a child or take from the parent if child has not provided one.
 
 You could look at this method for more details: `org.apache.ambari.server.api.util.StackExtensionHelper#mergeServices`
 
-For more information see the [Service Inheritance](./custom-services.md#Service20%Inheritance) wiki page.
+For more information see the [Service Inheritance](./custom-services.md#service-inheritance) wiki page.
 
 **If a component is missing in the new definition but is present in the parent, does it get inherited?**
 
@@ -26,4 +25,3 @@
 **Configuration dependencies for the service -- are they overwritten or merged?**
 
 Overwritten.
-
diff --git a/versioned_docs/version-3.0.0/ambari-design/stack-and-services/stack-inheritance.md b/versioned_docs/version-3.0.0/ambari-design/stack-and-services/stack-inheritance.md
index 8d5184d..5fd3ffd 100644
--- a/versioned_docs/version-3.0.0/ambari-design/stack-and-services/stack-inheritance.md
+++ b/versioned_docs/version-3.0.0/ambari-design/stack-and-services/stack-inheritance.md
@@ -1,4 +1,3 @@
-
 # Stack Inheritance
 
 Each stack version must provide a metainfo.xml descriptor file which can declare whether the stack inherits from another stack version:
@@ -17,21 +16,21 @@
 
 The following files should not be redefined at the child stack version level:
 
-* properties/stack_features.json
-* properties/stack_tools.json
+- properties/stack_features.json
+- properties/stack_tools.json
 
 Note: These files should only exist at the base stack level.
 
 The following files if defined in the current stack version replace the definitions from the parent stack version:
 
-* kerberos.json
-* widgets.json
+- kerberos.json
+- widgets.json
 
 The following files if defined in the current stack version are merged with the parent stack version:
 
-* configuration/cluster-env.xml
+- configuration/cluster-env.xml
 
-* role_command_order.json
+- role_command_order.json
 
 Note: All the services' role command orders will be merge with the stack's role command order to provide a master list.
 
@@ -39,14 +38,14 @@
 
 The following directories if defined in the current stack version replace those from the parent stack version:
 
-* hooks
+- hooks
 
 This means the files included in those directories at the parent level will not be inherited. You will need to copy all the files you wish to keep from that directory structure.
 
 The following directories are not inherited:
 
-* repos
-* upgrades
+- repos
+- upgrades
 
 The repos/repoinfo.xml file should be defined in every stack version. The upgrades directory and its corresponding XML files should be defined in all stack versions that support upgrade.
 
@@ -59,10 +58,10 @@
       def __init__(self):
         super(HDP23StackAdvisor, self).__init__()
         Logger.initialize_logger()
- 
+
       def getComponentLayoutValidations(self, services, hosts):
         parentItems = super(HDP23StackAdvisor, self).getComponentLayoutValidations(services, hosts)
                  ...
 ```
 
-Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#Service20%Inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
+Services defined within the services folder follow the rules for [service inheritance](./custom-services.md#service-inheritance). By default if a service does not declare an explicit inheritance (via the **extends** tag), the service will inherit from the service defined at the parent stack version.
diff --git a/versioned_docs/version-3.0.0/ambari-design/views/index.md b/versioned_docs/version-3.0.0/ambari-design/views/index.md
index 4f047e6..ddd0ce9 100644
--- a/versioned_docs/version-3.0.0/ambari-design/views/index.md
+++ b/versioned_docs/version-3.0.0/ambari-design/views/index.md
@@ -1,33 +1,30 @@
 # Views
 
-:::info
-This capability is currently under development.
-:::info
+:::info This capability is currently under development. :::info
 
 **Ambari Views** offer a systematic way to plug-in UI capabilities to surface custom visualization, management and monitoring features in Ambari Web. A " **view**" is a way of extending Ambari that allows 3rd parties to plug in new resource types along with the APIs, providers and UI to support them. In other words, a view is an application that is deployed into the Ambari container.
 
-
 ## Useful Resources
 
-Resource | Link
----------|-------
-Views Overview  | http://www.slideshare.net/hortonworks/ambari-views-overview
-Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md
-Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples
+| Resource                 | Link                                                                   |
+| ------------------------ | ---------------------------------------------------------------------- |
+| Views Overview           | http://www.slideshare.net/hortonworks/ambari-views-overview            |
+| Views Framework API Docs | https://github.com/apache/ambari/blob/trunk/ambari-views/docs/index.md |
+| Views Framework Examples | https://github.com/apache/ambari/tree/trunk/ambari-views/examples      |
 
 ## Terminology
 
 The following section describes the basic terminology associated with views.
 
-Term | Description
----------|-------
-View Name     | The name of the view. The view name identifies the view to Ambari.
-View Version  | The version of the view. A unique view name can have multiple versions deployed in Ambari.
-View Package  | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#View20%Package) for more information on the contents and structure of the package.
-View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features.
-View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information.
-View API  | The REST API for viewing the list of deployed views and creating view instances. See View API for more information.
-Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information.
+| Term | Description |
+| --- | --- |
+| View Name | The name of the view. The view name identifies the view to Ambari. |
+| View Version | The version of the view. A unique view name can have multiple versions deployed in Ambari. |
+| View Package | This is the JAR package that contains the **view definition** and all view resources (server-side resources and client-side assets) for a given view version. See [View Package](#view-package) for more information on the contents and structure of the package. |
+| View Definition | This defines the view name, version, resources and required/optional configuration parameters for a view. The view definition file is included in the view package. See View Definition for more information on the view definition file syntax and features. |
+| View Instance | An unique instance of a view, that is based on a view definition and specific version that is configured. See Versions and Instances for more information. |
+| View API | The REST API for viewing the list of deployed views and creating view instances. See View API for more information. |
+| Framework Services | The server-side of the view framework exposes certain services for use with your views. This includes persistence of view instance data and view eventing. See Framework Services for more information. |
 
 ## Components of a View
 
@@ -43,11 +40,9 @@
 
 A view can expose resources as REST end points to be used in conjunction with the client-side to deliver the functionality of your view application. Thees resources are written in Java and can be anything from a servlet to a regular REST service to an Ambari ResourceProvider (i.e. a special type of REST service that handles some REST capabilities such as partial response and pagination – if you adhere to the Ambari ResourceProvider interface). See [Framework Services](./framework-services.md) for more information on capabilities that the framework exposes on the server-side for views.
 
-:::info
-Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
+:::info Checkout the **Weather View** as an example of a view that exposes servlet and REST endpoints.
 
-[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view)
-:::
+[https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view](https://github.com/apache/ambari/tree/trunk/ambari-views/examples/weather-view) :::
 
 ## View Package
 
diff --git a/versioned_docs/version-3.0.0/ambari-dev/index.md b/versioned_docs/version-3.0.0/ambari-dev/index.md
index 3dba30d..fda2389 100644
--- a/versioned_docs/version-3.0.0/ambari-dev/index.md
+++ b/versioned_docs/version-3.0.0/ambari-dev/index.md
@@ -12,11 +12,9 @@
 
 Alternatively, you can easily launch a VM that is preconfigured with all the tools that you need. See the **Pre-Configured Development Environment** section in the [Quick Start Guide](../quick-start/quick-start-guide.md).
 
-* xCode (if using Mac - free download from the apple store)
-* JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
-* [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later
-Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files:
-File: ~/.profile
+- xCode (if using Mac - free download from the apple store)
+- JDK 8 (Ambari 2.6 and below can be compiled with JDK 7, from Ambari 2.7, it can be compiled with at least JDK 8)
+- [Apache Maven](http://maven.apache.org/download.html) 3.3.9 or later Tip:In order to persist your changes to the JAVA_HOME environment variable and add Maven to your path, create the following files: File: ~/.profile
 
 ```bash
 source ~/.bashrc
@@ -30,10 +28,8 @@
 export _JAVA_OPTIONS="-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true"
 ```
 
-
-* Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
-* Python setuptools:
-for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
+- Python 2.6 (Ambari 2.7 or later require Python 2.7 as minimum supported version)
+- Python setuptools: for Python 2.6: D [ownload](http://pypi.python.org/packages/2.6/s/setuptools/setuptools-0.6c11-py2.6.egg#md5=bfa92100bd772d5a213eedd356d64086) setuptools and run:
 
 ```bash
 sh setuptools-0.6c11-py2.6.egg
@@ -45,61 +41,53 @@
 sh setuptools-0.6c11-py2.7.egg
 ```
 
-
-* rpmbuild (rpm-build package)
-* g++ (gcc-c++ package)
+- rpmbuild (rpm-build package)
+- g++ (gcc-c++ package)
 
 ## Running Unit Tests
 
-* `mvn clean test`
-* Run unit tests in a single module:
+- `mvn clean test`
+- Run unit tests in a single module:
 
 ```bash
 mvn -pl ambari-server test
 ```
 
-
-* Run only Java tests:
+- Run only Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests
 ```
 
-
-* Run only specific Java tests:
+- Run only specific Java tests:
 
 ```bash
 mvn -pl ambari-server -DskipPythonTests -Dtest=AgentHostInfoTest test
 ```
 
-
-* Run only Python tests:
+- Run only Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests test
 ```
 
-
-* Run only specific Python tests:
+- Run only specific Python tests:
 
 ```bash
 mvn -pl ambari-server -DskipSurefireTests -Dpython.test.mask=TestUtils.py test
 ```
 
-
-* Run only Checkstyle and RAT checks:
+- Run only Checkstyle and RAT checks:
 
 ```bash
 mvn -pl ambari-server -DskipTests test
 ```
 
-
-
 NOTE: Please make sure you have npm in the path before running the unit tests.
 
 ## Generating Findbugs Report
 
-* mvn clean install
+- mvn clean install
 
 This will generate xml and html report unders target/findbugs. You can also add flags to skip unit tests to generate report faster.
 
@@ -109,7 +97,7 @@
 
 To build Ambari RPMs, run the following.
 
-Note: Replace ${AMBARI_VERSION} with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
+Note: Replace `${AMBARI_VERSION}` with a 4-digit version you want the artifacts to be (e.g., -DnewVersion=1.6.1.1)
 
 **Note**: If running into errors while compiling the ambari-metrics package due to missing the artifacts of jms, jmxri, jmxtools:
 
@@ -134,14 +122,14 @@
 
 ## Setting the Version Using Maven
 
-Ambari 2.8+ uses a newer method to update the version when building Ambari. 
+Ambari 2.8+ uses a newer method to update the version when building Ambari.
 
 **RHEL/CentOS 6**:
 
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
- 
+
 mvn -B clean install package rpm:rpm -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -150,7 +138,7 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package rpm:rpm -DskipTests -Psuse11 -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
@@ -159,36 +147,35 @@
 ```
 # Update the revision property to the release version
 mvn versions:set-property -Dproperty=revision -DnewVersion=2.8.0.0.0
-  
+
 mvn -B clean install package jdeb:jdeb -DskipTests -Dpython.ver="python >= 2.6" -Preplaceurl
 ```
 
 Ambari Server will create following packages
 
-* RPM will be created under `AMBARI_DIR`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
+- RPM will be created under `AMBARI_DIR`/ambari-server/target/rpm/ambari-server/RPMS/noarch.
 
-* DEB will be created under `AMBARI_DIR`/ambari-server/target/
+- DEB will be created under `AMBARI_DIR`/ambari-server/target/
 
 Ambari Agent will create following packages
 
-* RPM will be created under `AMBARI_DIR`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
+- RPM will be created under `AMBARI_DIR`/ambari-agent/target/rpm/ambari-agent/RPMS/x86_64.
 
-* DEB will be created under `AMBARI_DIR`/ambari-agent/target
+- DEB will be created under `AMBARI_DIR`/ambari-agent/target
 
 Optional parameters:
 
-* -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
+- -X -e: add these options for more verbose output by Maven. Useful when debugging Maven issues.
 
-* -DdefaultStackVersion=STACK-VERSION
-* Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
-* -DenableExperimental=true
-* Enables experimental features to be available via Ambari Web (default is false)
-* All views can be packaged in RPM by adding _-Dviews_ parameter
+- -DdefaultStackVersion=STACK-VERSION
+- Sets the default stack and version to be used for installation (e.g., -DdefaultStackVersion=HDP-1.3.0)
+- -DenableExperimental=true
+- Enables experimental features to be available via Ambari Web (default is false)
+- All views can be packaged in RPM by adding _-Dviews_ parameter
   - _mvn -B clean install package rpm:rpm -Dviews -DskipTests_
-* Specific views can be built by adding `--projects` parameter to the _-Dviews_
+- Specific views can be built by adding `--projects` parameter to the _-Dviews_
   - _mvn -B clean install package rpm:rpm --projects ambari-web,ambari-project,ambari-views,ambari-admin,contrib/views/files,contrib/views/pig,ambari-server,ambari-agent,ambari-client,ambari-shell -Dviews -DskipTests_
 
-
 _NOTE: Run everything as `root` below._
 
 ## Building Ambari Metrics
diff --git a/versioned_docs/version-3.0.0/ambari-dev/releasing-ambari.md b/versioned_docs/version-3.0.0/ambari-dev/releasing-ambari.md
index 4091c2c..ab7717d 100644
--- a/versioned_docs/version-3.0.0/ambari-dev/releasing-ambari.md
+++ b/versioned_docs/version-3.0.0/ambari-dev/releasing-ambari.md
@@ -4,13 +4,13 @@
 
 ### [Publishing Maven Artifacts](http://apache.org/dev/publishing-maven-artifacts.html)
 
-* Setting up release signing keys
-* Uploading artifacts to staging and release repositories
+- Setting up release signing keys
+- Uploading artifacts to staging and release repositories
 
 ### [Apache Release Guidelines](http://www.apache.org/legal/release-policy.html)
 
-* Release requirements
-* Process for staging
+- Release requirements
+- Process for staging
 
 ## Preparing for release
 
@@ -61,10 +61,10 @@
 Copy over {username}.asc to {username}@home.apache.org:public_html/~{username}.asc
 Verify URL http://home.apache.org/~{username}/{username}.asc
 Query PGP KeyServer http://pgp.mit.edu:11371/pks/lookup?search=0x{key}&op=vindex
-  
+
 Web of Trust:
 Request others to sign your PGP key.
- 
+
 Login at http://id.apache.org
 Add OpenPGP Fingerprint to your profile
 OpenPGP Public Key Primary Fingerprint: XXXX YYYY ZZZZ ....
@@ -94,7 +94,7 @@
 
 Create a branch for a release using branch-X.Y (ex: branch-2.1) as the name of the branch.
 
-Note: Going forward, we should be creating branch-{majorVersion}.{minorVersion}, so that the same branch can be used for maintenance releases.
+Note: Going forward, we should be creating branch-[majorVersion].[minorVersion], so that the same branch can be used for maintenance releases.
 
 **Checkout the release branch**
 
@@ -145,9 +145,8 @@
 # Review and commit the changes to branch-X.Y
 git commit
 ```
-:::danger
-Ambari 2.7 and Earlier Releases (Deprecated)
-:::
+
+:::danger Ambari 2.7 and Earlier Releases (Deprecated) :::
 
 Older Ambari branches still required that you update every `pom.xml` manually through the below process:
 
@@ -216,7 +215,7 @@
 
 **Setup Build**
 
-Setup Jenkins Job for the new branch on http://builds.apache.org 
+Setup Jenkins Job for the new branch on http://builds.apache.org
 
 ## Creating Release Candidate
 
@@ -294,9 +293,9 @@
 
 **Call for a vote on the dev@ambari.apache.org mailing list with something like this:**
 
-I have created an ambari-** release candidate.
+I have created an ambari-\*\* release candidate.
 
-GIT source tag (r***)
+GIT source tag (r\*\*\*)
 
 ```
 https://git-wip-us.apache.org/repos/asf/ambari/repo?p=ambari.git;a=log;h=refs/tags/release-x.y.z-rc0
@@ -316,8 +315,8 @@
 
 ## Publishing and Announcement
 
-* Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
-* Upload your PGP public key only to _/home/_
+- Login to [https://id.apache.org](https://id.apache.org) and verify the fingerprint of PGP key used to sign above is provided. (gpg --fingerprint)
+- Upload your PGP public key only to _/home/_
 
 Publish the release as below:
 
@@ -388,9 +387,9 @@
 
 - Login to https://reporter.apache.org/addrelease.html?ambari with apache credentials.
 - Fill out the fields:
-    - Committe: ambari
-    - Full version name: 2.2.0
-    - Date of release (YYYY-MM-DD):  2015-12-19
+  - Committe: ambari
+  - Full version name: 2.2.0
+  - Date of release (YYYY-MM-DD): 2015-12-19
 - Submit the data
 - Verify that the submitted data is reflected at https://reporter.apache.org/?ambari
 
@@ -398,4 +397,4 @@
 
 ## Publish Ambari artifacts to Maven central
 
-Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.  
+Please use the following [document](https://docs.google.com/document/d/1RjWQOaTUne6t8DPJorPhOMWAfOb6Xou6sAdHk96CHDw/edit) to publish Ambari artifacts to Maven central.
diff --git a/versioned_docs/version-3.0.0/ambari-plugin-contribution/scom/installation.md b/versioned_docs/version-3.0.0/ambari-plugin-contribution/scom/installation.md
index 6032c1d..25ca698 100644
--- a/versioned_docs/version-3.0.0/ambari-plugin-contribution/scom/installation.md
+++ b/versioned_docs/version-3.0.0/ambari-plugin-contribution/scom/installation.md
@@ -1,18 +1,18 @@
 # Installation
 
-## Prerequisite Software 
+## Prerequisite Software
 
 Setting up Ambari SCOM assumes the following prerequisite software:
 
-* Ambari SCOM 1.0
+- Ambari SCOM 1.0
   - Apache Hadoop 1.x cluster (HDFS and MapReduce) 1
-* Ambari SCOM 2.0
+- Ambari SCOM 2.0
   - Apache Hadoop 2.x cluster (HDFS and YARN/MapReduce) 2
-* JDK 1.7
-* Microsoft SQL Server 2012
-* Microsoft JDBC Driver 4.0 for SQL Server 3
-* Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
-* System Center Monitoring Agent installed on **Watcher Node** 4
+- JDK 1.7
+- Microsoft SQL Server 2012
+- Microsoft JDBC Driver 4.0 for SQL Server 3
+- Microsoft System Center Operations Manager (SCOM) 2012 SP1 or later
+- System Center Monitoring Agent installed on **Watcher Node** 4
 
 1 _Ambari SCOM_ 1.0 has been tested with a Hadoop cluster based on **Hortonworks Data Platform 1.3 for Windows** ("[HDP 1.3 for Windows](http://hortonworks.com/products/releases/hdp-1-3-for-windows/)")
 
@@ -22,7 +22,7 @@
 
 4 See Microsoft TechNet topic for [Managing Discovery and Agents](http://technet.microsoft.com/en-us/library/hh212772.aspx). Minimum Agent requirements _.NET 4_ and _PowerShell 2.0 + 3.0_
 
-## Package Contents 
+## Package Contents
 
 ```
 ├─ ambari-scom- _**version**_.zip
@@ -33,25 +33,23 @@
 └── ambari-scom.msi
 ```
 
-File | Name | Description
------|------|-------------
-server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. 
-metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink.
-ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink
-mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software.
+| File | Name | Description |
+| --- | --- | --- |
+| server.zip | Server Package | Contains the required software for configuring the Ambari SCOM Server software. |
+| metrics-sink.zip | Metrics Sink Package | Contains the required software for manually configuring SQL Server and the Hadoop Metrics Sink. |
+| ambari-scom.msi | MSI Installer | The Ambari SCOM MSI Installer for configuring the Ambari SCOM Server and Hadoop Metrics Sink |
+| mp.zip | Management Pack Package | Contains the Ambari SCOM Management Pack software. |
 
 ## Ambari SCOM Server Installation
 
-:::caution
-The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#id-2installation-mgmtpack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below.
-:::
+:::caution The **Ambari SCOM Management Pack** must connect to an Ambari SCOM Server to retrieve cluster metrics. Therefore, you need to have an Ambari SCOM Server running in your cluster. If you have already installed your Hadoop cluster (including the Ganglia Service) with Ambari (minimum **Ambari 1.5.1 for SCOM 2.0.0**) and have an Ambari Server already running + managing your Hadoop 1.x cluster, you can use that Ambari Server and point the **Management Pack** that host. You can proceed directly to [Installing Ambari SCOM Management Pack](#installing-ambari-scom-management-pack) and skip these steps to install an Ambari SCOM Server. If you do not have an Ambari Server running + managing your cluster, you **must** install an Ambari SCOM Server using one of the methods described below. :::
 
 The following methods are available for installing Ambari SCOM Server:
 
-* **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
-* **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration. 
+- **Manual Installation** - This installation method requires you to configure the SQL Server database, setup the Ambari SCOM Server and configure the Hadoop Metrics Sink. This provides the most flexible install option based on your environment.
+- **MSI Installation** - This installation method installs the Ambari SCOM Server and configures the Hadoop Metrics Sink on all hosts in the cluster automatically using an MSI Installer. After launching the MSI, you provide information about your SQL Server database and the cluster for the installer to handle configuration.
 
-## Manual Installation 
+## Manual Installation
 
 ### Configuring SQL Server
 
@@ -63,11 +61,9 @@
 
 5. Create the Ambari SCOM database schema by running the `Hadoop-Metrics-SQLServer-CREATE.ddl` script.
 
-:::info
-The Hadoop Metrics DDL script will create a database called "HadoopMetrics".
-:::
+:::info The Hadoop Metrics DDL script will create a database called "HadoopMetrics". :::
 
-### Configuring Hadoop Metrics Sink 
+### Configuring Hadoop Metrics Sink
 
 #### Preparing the Metrics Sink
 
@@ -75,8 +71,7 @@
 
 2. Obtain the _Microsoft JDBC Driver 4.0 for SQL Server_ `sqljdbc4.jar` file.
 
-3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar`
-on each host.
+3. Copy `sqljdbc4.jar` and `metrics-sink-version.jar` to each host in the cluster. For example, copy to `C:\Ambari\metrics-sink-version.jar` and `C:\Ambari\sqljdbc4.jar` on each host.
 
 #### Setup Hadoop Metrics2 Interface
 
@@ -95,30 +90,27 @@
 reducetask.sink.sql.databaseUrl=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
+:::info _Where:_
 
-* _server = the SQL Server hostname_
-* _port = the SQL Server port (for example, 1433)_
-* _user = the SQL Server user (for example, sa)_
-* _password = the SQL Server password (for example, BigData1)_
-:::
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
 1. Update the Java classpath for each Hadoop service to include the `metrics-sink-<strong><em>version</em></strong>.jar` and `sqljdbc4.jar` files.
 
+   - Example: Updating the Java classpath for _HDP for Windows_ clusters
 
-    - Example: Updating the Java classpath for _HDP for Windows_ clusters
+     The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
 
-      The `service.xml` files will be located in the `C:\hadoop\install\dir\bin` folder of each host in the cluster. The Java classpath is specified for each service in the `<arguments>` element of the `service.xml` file. For example, to update the Java classpath for the `NameNode` component, edit the `C:\hadoop\bin\namenode.xml` file.
+     ```
+     ...
 
-        ```
-        ...
-        
-        ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
-        
-        ...
-        
-        ```
+     ... -classpath ...;C:\Ambari\metrics-sink-1.5.1.2.0.0.0-673.jar;C:\Ambari\sqljdbc4.jar ...
+
+     ...
+
+     ```
 
 2. Restart Hadoop for these changes to take affect.
 
@@ -129,9 +121,8 @@
 ```sql
 select * from HadoopMetrics.dbo.MetricRecord
 ```
-:::info
-In the above SQL statement, `HadoopMetrics` is the database name.
-:::
+
+:::info In the above SQL statement, `HadoopMetrics` is the database name. :::
 
 ### Installing and Configuring Ambari SCOM Server
 
@@ -158,34 +149,30 @@
 scom.sink.db.url=jdbc:sqlserver://[server]:[port];databaseName=HadoopMetrics;user=[user];password=[password]
 ```
 
-:::info
-_Where:_
-  - _server = the SQL Server hostname_
-  - _port = the SQL Server port (for example, 1433)_
-  - _user = the SQL Server user (for example, sa)_
-  - _password = the SQL Server password (for example, BigData1)_
-:::
+:::info _Where:_
 
-6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server. 
+- _server = the SQL Server hostname_
+- _port = the SQL Server port (for example, 1433)_
+- _user = the SQL Server user (for example, sa)_
+- _password = the SQL Server password (for example, BigData1)_ :::
 
-:::info
-Be sure to include the following in the classpath:
-  - `ambari-scom-server-version.jar` file
-  - configuration folder containing the Ambari SCOM configuration files
-  - lib folder containing the Ambari SCOM dependencies
-  - folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
-  - `sqljdbc4.jar` SQLServer JDBC Driver file
-::
-  
+6. Run the `org.apache.ambari.scom.AmbariServer` class from the Java command line to start the Ambari SCOM Server.
+
+:::info Be sure to include the following in the classpath:
+
+- `ambari-scom-server-version.jar` file
+- configuration folder containing the Ambari SCOM configuration files
+- lib folder containing the Ambari SCOM dependencies
+- folder containing the `clusterproperties.txt` file from the Hadoop install. For example, `c:\hadoop\install\dir`
+- `sqljdbc4.jar` SQLServer JDBC Driver file ::
+
 For example:
 
 ```bash
 java -server -XX:NewRatio=3 -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -XX:CMSInitiatingOccupancyFraction=60 -Xms512m -Xmx2048m -cp "c:\ambari-scom\server\conf;c:\ambari-scom\server\lib\*;c:\jdbc\sqljdbc4.jar;c:\hadoop\install\dir;c:\ambari-scom\server\ambari-scom-server-1.5.1.2.0.0.0-673.jar" org.apache.ambari.scom.AmbariServer
 ```
 
-:::info
-In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file.
-:::
+:::info In the above command, be sure to replace the Ambari SCOM version in the `ambari-scom-server-version.jar` and replace `c:\hadoop\install\dir` with the folder containing the `clusterproperties.txt` file. :::
 
 #### Verify the Server API
 
@@ -194,6 +181,7 @@
 ```
 http://[ambari-scom-server]:8080/api/v1/clusters
 ```
+
 2. Verify that metrics are being reported.
 
 ```
@@ -217,33 +205,29 @@
 
 3. Run the `ambari-scom.msi` installer. The "Ambari SCOM Setup" dialog appears:
 
-     ![](./imgs/ambari-scom-msi2.png)
+   ![](./imgs/ambari-scom-msi2.png)
 
-4. Provide the following information: 
+4. Provide the following information:
 
-Field | Description
-------|------------
-Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari
-SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics.
-SQL Server port | The port of the SQL Server instance.
-SQL Server login | The login username.
-SQL Server password | The login password
-Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file.
-Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file.
+| Field | Description |
+| --- | --- |
+| Ambari SCOM package directory | The directory where the installer will place the Ambari SCOM Server packages. For example: C:\Ambari |
+| SQL Server hostname | The hostname of the SQL Server instance for Ambari SCOM Server to use to store Hadoop metrics. |
+| SQL Server port | The port of the SQL Server instance. |
+| SQL Server login | The login username. |
+| SQL Server password | The login password |
+| Path to SQL Server JDBC Driver (sqljdbc4.jar) | The path to the JDBC Driver JAR file. |
+| Path to the cluster layout file (clusterproperties.txt) | The path to the cluster layout properties file. |
 
 5. You can optionally select to Start Services
 6. Click Install
 7. After completion, links are created on the desktop to "Start Ambari SCOM Server", "Browse Ambari API" and "Browse Ambari API Metrics". After starting the Ambari SCOM Server, browse the API and Metrics to confirm the server is working properly.
 
-:::info
-The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log`
-:::
+:::info The MSI installer installation log can be found at `C:\AmbariInstallFiles\AmbariSetupTools\ambari.winpkg.install.log` :::
 
 ### Installing Ambari SCOM Management Pack
 
-:::info
-Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions.
-:::
+:::info Before installing the Management pack, be sure to install the Ambari SCOM Server using the Ambari SCOM Server Installation instructions. :::
 
 #### Import the Management Pack
 
@@ -272,14 +256,13 @@
 Ambari.SCOM.Management.mpb
 Ambari.SCOM.Presentation.mpb
 ```
+
 10. Click "Open"
 11. Review the Import list and click "Install".
 
 12. The Ambari SCOM Management Pack installation will start.
 
-:::info
-The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release.
-:::
+:::info The Ambari SCOM package also includes `AmbariSCOMManagementPack.msi` which is an alternative packaging of the `mp.zip`. This MSI is being made in **beta** form in this release. :::
 
 #### Create Run As Account
 
@@ -317,9 +300,7 @@
 http://[ambari-scom-server]:8080/api/
 ```
 
-:::info
-In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server.
-:::
+:::info In the above Ambari URI, `ambari-scom-server` is the Ambari SCOM Server. :::
 
 6. Select the Run As Account that you created in Create Run As Account.
 
@@ -337,4 +318,4 @@
 
 ## Monitoring Scenarios
 
-[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
\ No newline at end of file
+[Monitoring Scenarios](https://cwiki.apache.org/confluence/display/AMBARI/3.+Monitoring+Scenarios)
diff --git a/yarn.lock b/yarn.lock
index b6ea173..37fe60a 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1987,14 +1987,6 @@
   resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz"
   integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
 
-"@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25":
-  version "0.3.25"
-  resolved "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz"
-  integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==
-  dependencies:
-    "@jridgewell/resolve-uri" "^3.1.0"
-    "@jridgewell/sourcemap-codec" "^1.4.14"
-
 "@jridgewell/trace-mapping@0.3.9":
   version "0.3.9"
   resolved "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz"
@@ -2003,6 +1995,14 @@
     "@jridgewell/resolve-uri" "^3.0.3"
     "@jridgewell/sourcemap-codec" "^1.4.10"
 
+"@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25":
+  version "0.3.25"
+  resolved "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz"
+  integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==
+  dependencies:
+    "@jridgewell/resolve-uri" "^3.1.0"
+    "@jridgewell/sourcemap-codec" "^1.4.14"
+
 "@leichtgewicht/ip-codec@^2.0.1":
   version "2.0.5"
   resolved "https://registry.npmmirror.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz"
@@ -2053,7 +2053,7 @@
     "@nodelib/fs.stat" "2.0.5"
     run-parallel "^1.1.9"
 
-"@nodelib/fs.stat@^2.0.2", "@nodelib/fs.stat@2.0.5":
+"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
   version "2.0.5"
   resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz"
   integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
@@ -2634,7 +2634,7 @@
   resolved "https://registry.npmmirror.com/@ungap/structured-clone/-/structured-clone-1.3.0.tgz"
   integrity sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==
 
-"@webassemblyjs/ast@^1.14.1", "@webassemblyjs/ast@1.14.1":
+"@webassemblyjs/ast@1.14.1", "@webassemblyjs/ast@^1.14.1":
   version "1.14.1"
   resolved "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.14.1.tgz"
   integrity sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==
@@ -2735,7 +2735,7 @@
     "@webassemblyjs/wasm-gen" "1.14.1"
     "@webassemblyjs/wasm-parser" "1.14.1"
 
-"@webassemblyjs/wasm-parser@^1.14.1", "@webassemblyjs/wasm-parser@1.14.1":
+"@webassemblyjs/wasm-parser@1.14.1", "@webassemblyjs/wasm-parser@^1.14.1":
   version "1.14.1"
   resolved "https://registry.npmmirror.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz"
   integrity sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==
@@ -2765,6 +2765,14 @@
   resolved "https://registry.npmmirror.com/@xtuc/long/-/long-4.2.2.tgz"
   integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==
 
+JSONStream@^1.0.4:
+  version "1.3.5"
+  resolved "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz"
+  integrity sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==
+  dependencies:
+    jsonparse "^1.2.0"
+    through ">=2.2.7 <3"
+
 accepts@~1.3.4, accepts@~1.3.8:
   version "1.3.8"
   resolved "https://registry.npmmirror.com/accepts/-/accepts-1.3.8.tgz"
@@ -2835,7 +2843,7 @@
     json-schema-traverse "^0.4.1"
     uri-js "^4.2.2"
 
-ajv@^8.0.0:
+ajv@^8.0.0, ajv@^8.9.0:
   version "8.17.1"
   resolved "https://registry.npmmirror.com/ajv/-/ajv-8.17.1.tgz"
   integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==
@@ -2855,16 +2863,6 @@
     require-from-string "^2.0.2"
     uri-js "^4.2.2"
 
-ajv@^8.9.0:
-  version "8.17.1"
-  resolved "https://registry.npmmirror.com/ajv/-/ajv-8.17.1.tgz"
-  integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==
-  dependencies:
-    fast-deep-equal "^3.1.3"
-    fast-uri "^3.0.1"
-    json-schema-traverse "^1.0.0"
-    require-from-string "^2.0.2"
-
 algoliasearch-helper@^3.22.6:
   version "3.24.2"
   resolved "https://registry.npmmirror.com/algoliasearch-helper/-/algoliasearch-helper-3.24.2.tgz"
@@ -3302,16 +3300,7 @@
   resolved "https://registry.npmmirror.com/ccount/-/ccount-2.0.1.tgz"
   integrity sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==
 
-chalk@^2.4.1:
-  version "2.4.2"
-  resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
-  integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
-  dependencies:
-    ansi-styles "^3.2.1"
-    escape-string-regexp "^1.0.5"
-    supports-color "^5.3.0"
-
-chalk@^2.4.2:
+chalk@^2.4.1, chalk@^2.4.2:
   version "2.4.2"
   resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
   integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
@@ -3503,16 +3492,16 @@
   dependencies:
     color-name "~1.1.4"
 
-color-name@~1.1.4:
-  version "1.1.4"
-  resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
-  integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
-
 color-name@1.1.3:
   version "1.1.3"
   resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz"
   integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
 
+color-name@~1.1.4:
+  version "1.1.4"
+  resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
+  integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+
 colord@^2.9.3:
   version "2.9.3"
   resolved "https://registry.npmmirror.com/colord/-/colord-2.9.3.tgz"
@@ -3699,7 +3688,7 @@
   resolved "https://registry.npmjs.org/conventional-changelog-config-spec/-/conventional-changelog-config-spec-2.1.0.tgz"
   integrity sha512-IpVePh16EbbB02V+UA+HQnnPIohgXvJRxHcS5+Uwk4AT5LjzCZJm5sp/yqs5C6KZJ1jMsV4paEV13BN1pvDuxQ==
 
-conventional-changelog-conventionalcommits@^4.5.0, conventional-changelog-conventionalcommits@4.6.3:
+conventional-changelog-conventionalcommits@4.6.3, conventional-changelog-conventionalcommits@^4.5.0:
   version "4.6.3"
   resolved "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-4.6.3.tgz"
   integrity sha512-LTTQV4fwOM4oLPad317V/QNQ1FY4Hju5qeBIM1uTHbrnCE+Eg4CdRZ3gO2pUeR+tzWdp80M2j3qFFEDWVqOV4g==
@@ -3826,8 +3815,8 @@
   resolved "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-3.2.4.tgz"
   integrity sha512-nK7sAtfi+QXbxHCYfhpZsfRtaitZLIA6889kFIouLvz6repszQDgxBu7wf2WbU+Dco7sAnNCJYERCwt54WPC2Q==
   dependencies:
-    is-text-path "^1.0.1"
     JSONStream "^1.0.4"
+    is-text-path "^1.0.1"
     lodash "^4.17.15"
     meow "^8.0.0"
     split2 "^3.0.0"
@@ -3935,17 +3924,7 @@
     path-type "^4.0.0"
     yaml "^1.10.0"
 
-cosmiconfig@^8.1.3:
-  version "8.3.6"
-  resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-8.3.6.tgz"
-  integrity sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==
-  dependencies:
-    import-fresh "^3.3.0"
-    js-yaml "^4.1.0"
-    parse-json "^5.2.0"
-    path-type "^4.0.0"
-
-cosmiconfig@^8.3.5:
+cosmiconfig@^8.1.3, cosmiconfig@^8.3.5:
   version "8.3.6"
   resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-8.3.6.tgz"
   integrity sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==
@@ -4155,7 +4134,7 @@
   resolved "https://registry.npmjs.org/csstype/-/csstype-3.1.1.tgz"
   integrity sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==
 
-cz-conventional-changelog@^3.3.0, cz-conventional-changelog@3.3.0:
+cz-conventional-changelog@3.3.0, cz-conventional-changelog@^3.3.0:
   version "3.3.0"
   resolved "https://registry.npmjs.org/cz-conventional-changelog/-/cz-conventional-changelog-3.3.0.tgz"
   integrity sha512-U466fIzU5U22eES5lTNiNbZ+d8dfcHcssH4o7QsdWaCcRs/feIPCxKYSWkYBNs5mny7MvEfwpTLWjvbm94hecw==
@@ -4184,13 +4163,20 @@
   resolved "https://registry.npmmirror.com/debounce/-/debounce-1.2.1.tgz"
   integrity sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==
 
-debug@^2.6.0, debug@2.6.9:
+debug@2.6.9, debug@^2.6.0:
   version "2.6.9"
   resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
   integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
   dependencies:
     ms "2.0.0"
 
+debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1:
+  version "4.4.0"
+  resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz"
+  integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==
+  dependencies:
+    ms "^2.1.3"
+
 debug@^3.2.6:
   version "3.2.7"
   resolved "https://mirrors.cloud.tencent.com/npm/debug/-/debug-3.2.7.tgz"
@@ -4198,27 +4184,6 @@
   dependencies:
     ms "^2.1.1"
 
-debug@^4.0.0:
-  version "4.4.0"
-  resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz"
-  integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==
-  dependencies:
-    ms "^2.1.3"
-
-debug@^4.1.0:
-  version "4.4.0"
-  resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz"
-  integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==
-  dependencies:
-    ms "^2.1.3"
-
-debug@^4.1.1:
-  version "4.4.0"
-  resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz"
-  integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==
-  dependencies:
-    ms "^2.1.3"
-
 debug@^4.3.1:
   version "4.3.4"
   resolved "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz"
@@ -4226,13 +4191,6 @@
   dependencies:
     ms "2.1.2"
 
-debug@4:
-  version "4.4.0"
-  resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz"
-  integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==
-  dependencies:
-    ms "^2.1.3"
-
 decamelize-keys@^1.1.0:
   version "1.1.0"
   resolved "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz"
@@ -4331,16 +4289,16 @@
     rimraf "^3.0.2"
     slash "^3.0.0"
 
-depd@~1.1.2:
-  version "1.1.2"
-  resolved "https://registry.npmmirror.com/depd/-/depd-1.1.2.tgz"
-  integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
-
 depd@2.0.0:
   version "2.0.0"
   resolved "https://registry.npmmirror.com/depd/-/depd-2.0.0.tgz"
   integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==
 
+depd@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.npmmirror.com/depd/-/depd-1.1.2.tgz"
+  integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
+
 dequal@^2.0.0:
   version "2.0.3"
   resolved "https://registry.npmmirror.com/dequal/-/dequal-2.0.3.tgz"
@@ -4356,7 +4314,7 @@
   resolved "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz"
   integrity sha512-DtCOLG98P007x7wiiOmfI0fi3eIKyWiLTGJ2MDnVi/E04lWGbf+JzrRHMm0rgIIZJGtHpKpbVgLWHrv8xXpc3Q==
 
-detect-indent@^6.0.0, detect-indent@6.1.0:
+detect-indent@6.1.0, detect-indent@^6.0.0:
   version "6.1.0"
   resolved "https://registry.npmjs.org/detect-indent/-/detect-indent-6.1.0.tgz"
   integrity sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==
@@ -5099,6 +5057,16 @@
   resolved "https://registry.npmmirror.com/fresh/-/fresh-0.5.2.tgz"
   integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==
 
+fs-extra@9.1.0, fs-extra@^9.0.0:
+  version "9.1.0"
+  resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz"
+  integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==
+  dependencies:
+    at-least-node "^1.0.0"
+    graceful-fs "^4.2.0"
+    jsonfile "^6.0.1"
+    universalify "^2.0.0"
+
 fs-extra@^11.1.1, fs-extra@^11.2.0:
   version "11.3.0"
   resolved "https://registry.npmmirror.com/fs-extra/-/fs-extra-11.3.0.tgz"
@@ -5108,26 +5076,6 @@
     jsonfile "^6.0.1"
     universalify "^2.0.0"
 
-fs-extra@^9.0.0:
-  version "9.1.0"
-  resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz"
-  integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==
-  dependencies:
-    at-least-node "^1.0.0"
-    graceful-fs "^4.2.0"
-    jsonfile "^6.0.1"
-    universalify "^2.0.0"
-
-fs-extra@9.1.0:
-  version "9.1.0"
-  resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz"
-  integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==
-  dependencies:
-    at-least-node "^1.0.0"
-    graceful-fs "^4.2.0"
-    jsonfile "^6.0.1"
-    universalify "^2.0.0"
-
 fs-monkey@^1.0.3:
   version "1.0.3"
   resolved "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz"
@@ -5260,7 +5208,7 @@
   resolved "https://registry.npmmirror.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz"
   integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==
 
-glob@^7.0.0, glob@^7.1.3, glob@^7.1.6, glob@7.2.3:
+glob@7.2.3, glob@^7.0.0, glob@^7.1.3, glob@^7.1.6:
   version "7.2.3"
   resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz"
   integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
@@ -5372,7 +5320,7 @@
     p-cancelable "^3.0.0"
     responselike "^3.0.0"
 
-graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9, graceful-fs@4.2.10:
+graceful-fs@4.2.10, graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
   version "4.2.10"
   resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz"
   integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==
@@ -5716,16 +5664,6 @@
   resolved "https://registry.npmmirror.com/http-deceiver/-/http-deceiver-1.2.7.tgz"
   integrity sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==
 
-http-errors@~1.6.2:
-  version "1.6.3"
-  resolved "https://registry.npmmirror.com/http-errors/-/http-errors-1.6.3.tgz"
-  integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
-  dependencies:
-    depd "~1.1.2"
-    inherits "2.0.3"
-    setprototypeof "1.1.0"
-    statuses ">= 1.4.0 < 2"
-
 http-errors@2.0.0:
   version "2.0.0"
   resolved "https://registry.npmmirror.com/http-errors/-/http-errors-2.0.0.tgz"
@@ -5737,6 +5675,16 @@
     statuses "2.0.1"
     toidentifier "1.0.1"
 
+http-errors@~1.6.2:
+  version "1.6.3"
+  resolved "https://registry.npmmirror.com/http-errors/-/http-errors-1.6.3.tgz"
+  integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
+  dependencies:
+    depd "~1.1.2"
+    inherits "2.0.3"
+    setprototypeof "1.1.0"
+    statuses ">= 1.4.0 < 2"
+
 http-parser-js@>=0.5.1:
   version "0.5.9"
   resolved "https://registry.npmmirror.com/http-parser-js/-/http-parser-js-0.5.9.tgz"
@@ -5775,7 +5723,7 @@
   resolved "https://registry.npmmirror.com/human-signals/-/human-signals-2.1.0.tgz"
   integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==
 
-iconv-lite@^0.4.24, iconv-lite@0.4.24:
+iconv-lite@0.4.24, iconv-lite@^0.4.24:
   version "0.4.24"
   resolved "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz"
   integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==
@@ -5857,7 +5805,7 @@
     once "^1.3.0"
     wrappy "1"
 
-inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.3, inherits@2, inherits@2.0.4:
+inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.3:
   version "2.0.4"
   resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz"
   integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
@@ -5867,16 +5815,16 @@
   resolved "https://registry.npmmirror.com/inherits/-/inherits-2.0.3.tgz"
   integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==
 
-ini@^1.3.2, ini@^1.3.4, ini@^1.3.5, ini@~1.3.0:
-  version "1.3.8"
-  resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz"
-  integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
-
 ini@2.0.0:
   version "2.0.0"
   resolved "https://registry.npmmirror.com/ini/-/ini-2.0.0.tgz"
   integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==
 
+ini@^1.3.2, ini@^1.3.4, ini@^1.3.5, ini@~1.3.0:
+  version "1.3.8"
+  resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz"
+  integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
+
 inline-style-parser@0.2.4:
   version "0.2.4"
   resolved "https://registry.npmmirror.com/inline-style-parser/-/inline-style-parser-0.2.4.tgz"
@@ -5915,16 +5863,16 @@
   dependencies:
     loose-envify "^1.0.0"
 
-ipaddr.js@^2.0.1:
-  version "2.2.0"
-  resolved "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-2.2.0.tgz"
-  integrity sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==
-
 ipaddr.js@1.9.1:
   version "1.9.1"
   resolved "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz"
   integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
 
+ipaddr.js@^2.0.1:
+  version "2.2.0"
+  resolved "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-2.2.0.tgz"
+  integrity sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==
+
 is-alphabetical@^2.0.0:
   version "2.0.1"
   resolved "https://registry.npmmirror.com/is-alphabetical/-/is-alphabetical-2.0.1.tgz"
@@ -6125,16 +6073,16 @@
   resolved "https://registry.npmmirror.com/is-yarn-global/-/is-yarn-global-0.4.1.tgz"
   integrity sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==
 
-isarray@~1.0.0:
-  version "1.0.0"
-  resolved "https://registry.npmmirror.com/isarray/-/isarray-1.0.0.tgz"
-  integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==
-
 isarray@0.0.1:
   version "0.0.1"
   resolved "https://registry.npmmirror.com/isarray/-/isarray-0.0.1.tgz"
   integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==
 
+isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.npmmirror.com/isarray/-/isarray-1.0.0.tgz"
+  integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==
+
 isexe@^2.0.0:
   version "2.0.0"
   resolved "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz"
@@ -6271,14 +6219,6 @@
   resolved "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz"
   integrity sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==
 
-JSONStream@^1.0.4:
-  version "1.3.5"
-  resolved "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz"
-  integrity sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==
-  dependencies:
-    jsonparse "^1.2.0"
-    through ">=2.2.7 <3"
-
 keyv@^4.5.3:
   version "4.5.4"
   resolved "https://registry.npmmirror.com/keyv/-/keyv-4.5.4.tgz"
@@ -6439,7 +6379,7 @@
   resolved "https://registry.npmmirror.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz"
   integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==
 
-lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@4.17.21:
+lodash@4.17.21, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21:
   version "4.17.21"
   resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz"
   integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
@@ -6462,7 +6402,7 @@
   resolved "https://registry.npmjs.org/longest/-/longest-2.0.1.tgz"
   integrity sha512-Ajzxb8CM6WAnFjgiloPsI3bF+WCxcvhdIG3KNA2KN962+tdBsHcuQ4k4qX/EcS/2CRkcc0iAkR956Nib6aXU/Q==
 
-loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0:
+loose-envify@^1.0.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0:
   version "1.4.0"
   resolved "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz"
   integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==
@@ -6807,16 +6747,16 @@
   resolved "https://registry.npmmirror.com/merge-stream/-/merge-stream-2.0.0.tgz"
   integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==
 
-merge@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.npmjs.org/merge/-/merge-2.1.1.tgz"
-  integrity sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w==
-
 merge2@^1.3.0, merge2@^1.4.1:
   version "1.4.1"
   resolved "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz"
   integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
 
+merge@^2.1.1:
+  version "2.1.1"
+  resolved "https://registry.npmjs.org/merge/-/merge-2.1.1.tgz"
+  integrity sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w==
+
 methods@~1.1.2:
   version "1.1.2"
   resolved "https://registry.npmmirror.com/methods/-/methods-1.1.2.tgz"
@@ -7248,6 +7188,11 @@
     braces "^3.0.3"
     picomatch "^2.3.1"
 
+mime-db@1.52.0:
+  version "1.52.0"
+  resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz"
+  integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+
 "mime-db@>= 1.43.0 < 2":
   version "1.53.0"
   resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.53.0.tgz"
@@ -7258,47 +7203,21 @@
   resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.33.0.tgz"
   integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==
 
-mime-db@1.52.0:
-  version "1.52.0"
-  resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz"
-  integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
-
-mime-types@^2.1.27:
-  version "2.1.35"
-  resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz"
-  integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
-  dependencies:
-    mime-db "1.52.0"
-
-mime-types@^2.1.31:
-  version "2.1.35"
-  resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz"
-  integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
-  dependencies:
-    mime-db "1.52.0"
-
-mime-types@~2.1.17, mime-types@2.1.18:
+mime-types@2.1.18, mime-types@~2.1.17:
   version "2.1.18"
   resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.18.tgz"
   integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==
   dependencies:
     mime-db "~1.33.0"
 
-mime-types@~2.1.24:
+mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.24, mime-types@~2.1.34:
   version "2.1.35"
   resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz"
   integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
   dependencies:
     mime-db "1.52.0"
 
-mime-types@~2.1.34:
-  version "2.1.35"
-  resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz"
-  integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
-  dependencies:
-    mime-db "1.52.0"
-
-mime@^1.4.1, mime@1.6.0:
+mime@1.6.0, mime@^1.4.1:
   version "1.6.0"
   resolved "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz"
   integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==
@@ -7336,13 +7255,6 @@
   resolved "https://registry.npmmirror.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz"
   integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
 
-minimatch@^3.0.4, minimatch@^3.1.1, minimatch@3.1.2:
-  version "3.1.2"
-  resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz"
-  integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
-  dependencies:
-    brace-expansion "^1.1.7"
-
 minimatch@3.0.4:
   version "3.0.4"
   resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz"
@@ -7350,6 +7262,13 @@
   dependencies:
     brace-expansion "^1.1.7"
 
+minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.1.1:
+  version "3.1.2"
+  resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz"
+  integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
+  dependencies:
+    brace-expansion "^1.1.7"
+
 minimist-options@4.1.0:
   version "4.1.0"
   resolved "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz"
@@ -7359,7 +7278,7 @@
     is-plain-obj "^1.1.0"
     kind-of "^6.0.3"
 
-minimist@^1.2.0, minimist@^1.2.5, minimist@1.2.6:
+minimist@1.2.6, minimist@^1.2.0, minimist@^1.2.5:
   version "1.2.6"
   resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz"
   integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==
@@ -7374,11 +7293,6 @@
   resolved "https://registry.npmmirror.com/mrmime/-/mrmime-2.0.1.tgz"
   integrity sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==
 
-ms@^2.1.1, ms@^2.1.3, ms@2.1.3:
-  version "2.1.3"
-  resolved "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
-  integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
-
 ms@2.0.0:
   version "2.0.0"
   resolved "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
@@ -7389,6 +7303,11 @@
   resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
   integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
 
+ms@2.1.3, ms@^2.1.1, ms@^2.1.3:
+  version "2.1.3"
+  resolved "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
+  integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
+
 multicast-dns@^7.2.5:
   version "7.2.5"
   resolved "https://registry.npmmirror.com/multicast-dns/-/multicast-dns-7.2.5.tgz"
@@ -7416,16 +7335,16 @@
     iconv-lite "^0.6.3"
     sax "^1.2.4"
 
-negotiator@~0.6.4:
-  version "0.6.4"
-  resolved "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.4.tgz"
-  integrity sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==
-
 negotiator@0.6.3:
   version "0.6.3"
   resolved "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.3.tgz"
   integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==
 
+negotiator@~0.6.4:
+  version "0.6.4"
+  resolved "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.4.tgz"
+  integrity sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==
+
 neo-async@^2.6.0, neo-async@^2.6.2:
   version "2.6.2"
   resolved "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz"
@@ -7459,17 +7378,7 @@
   resolved "https://registry.npmmirror.com/node-releases/-/node-releases-2.0.19.tgz"
   integrity sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==
 
-normalize-package-data@^2.3.2:
-  version "2.5.0"
-  resolved "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz"
-  integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==
-  dependencies:
-    hosted-git-info "^2.1.4"
-    resolve "^1.10.0"
-    semver "2 || 3 || 4 || 5"
-    validate-npm-package-license "^3.0.1"
-
-normalize-package-data@^2.5.0:
+normalize-package-data@^2.3.2, normalize-package-data@^2.5.0:
   version "2.5.0"
   resolved "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz"
   integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==
@@ -7850,13 +7759,6 @@
   resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz"
   integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
 
-path-to-regexp@^1.7.0:
-  version "1.9.0"
-  resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-1.9.0.tgz"
-  integrity sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==
-  dependencies:
-    isarray "0.0.1"
-
 path-to-regexp@0.1.12:
   version "0.1.12"
   resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-0.1.12.tgz"
@@ -7867,6 +7769,13 @@
   resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-3.3.0.tgz"
   integrity sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==
 
+path-to-regexp@^1.7.0:
+  version "1.9.0"
+  resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-1.9.0.tgz"
+  integrity sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==
+  dependencies:
+    isarray "0.0.1"
+
 path-type@^3.0.0:
   version "3.0.0"
   resolved "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz"
@@ -8603,21 +8512,16 @@
   dependencies:
     safe-buffer "^5.1.0"
 
-range-parser@^1.2.1:
-  version "1.2.1"
-  resolved "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz"
-  integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
-
-range-parser@~1.2.1:
-  version "1.2.1"
-  resolved "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz"
-  integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
-
 range-parser@1.2.0:
   version "1.2.0"
   resolved "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.0.tgz"
   integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==
 
+range-parser@^1.2.1, range-parser@~1.2.1:
+  version "1.2.1"
+  resolved "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz"
+  integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
+
 raw-body@2.5.2:
   version "2.5.2"
   resolved "https://registry.npmmirror.com/raw-body/-/raw-body-2.5.2.tgz"
@@ -8668,13 +8572,12 @@
     strip-ansi "^6.0.1"
     text-table "^0.2.0"
 
-react-dom@^18.2.0:
-  version "18.3.1"
-  resolved "https://registry.npmmirror.com/react-dom/-/react-dom-18.3.1.tgz"
-  integrity sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==
+react-dom@^19.0.0:
+  version "19.0.0"
+  resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-19.0.0.tgz#43446f1f01c65a4cd7f7588083e686a6726cfb57"
+  integrity sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==
   dependencies:
-    loose-envify "^1.1.0"
-    scheduler "^0.23.2"
+    scheduler "^0.25.0"
 
 react-error-overlay@^6.0.11:
   version "6.0.11"
@@ -8748,7 +8651,7 @@
     tiny-invariant "^1.0.2"
     tiny-warning "^1.0.0"
 
-react-router@^5.3.4, react-router@5.3.4:
+react-router@5.3.4, react-router@^5.3.4:
   version "5.3.4"
   resolved "https://registry.npmmirror.com/react-router/-/react-router-5.3.4.tgz"
   integrity sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==
@@ -8763,12 +8666,10 @@
     tiny-invariant "^1.0.2"
     tiny-warning "^1.0.0"
 
-react@^18.2.0:
-  version "18.3.1"
-  resolved "https://registry.npmmirror.com/react/-/react-18.3.1.tgz"
-  integrity sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==
-  dependencies:
-    loose-envify "^1.1.0"
+react@^19.0.0:
+  version "19.0.0"
+  resolved "https://registry.yarnpkg.com/react/-/react-19.0.0.tgz#6e1969251b9f108870aa4bff37a0ce9ddfaaabdd"
+  integrity sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==
 
 read-pkg-up@^3.0.0:
   version "3.0.0"
@@ -8806,6 +8707,15 @@
     parse-json "^5.0.0"
     type-fest "^0.6.0"
 
+readable-stream@3, readable-stream@^3.0.0, readable-stream@^3.0.2, readable-stream@^3.0.6, readable-stream@^3.4.0:
+  version "3.6.0"
+  resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz"
+  integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==
+  dependencies:
+    inherits "^2.0.3"
+    string_decoder "^1.1.1"
+    util-deprecate "^1.0.1"
+
 readable-stream@^2.0.1:
   version "2.3.8"
   resolved "https://registry.npmmirror.com/readable-stream/-/readable-stream-2.3.8.tgz"
@@ -8819,15 +8729,6 @@
     string_decoder "~1.1.1"
     util-deprecate "~1.0.1"
 
-readable-stream@^3.0.0, readable-stream@^3.0.2, readable-stream@^3.0.6, readable-stream@^3.4.0, readable-stream@3:
-  version "3.6.0"
-  resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz"
-  integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==
-  dependencies:
-    inherits "^2.0.3"
-    string_decoder "^1.1.1"
-    util-deprecate "^1.0.1"
-
 readable-stream@~2.3.6:
   version "2.3.7"
   resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz"
@@ -9222,7 +9123,7 @@
   dependencies:
     tslib "^2.1.0"
 
-safe-buffer@^5.1.0, safe-buffer@>=5.1.0, safe-buffer@~5.2.0, safe-buffer@5.2.1:
+safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@~5.2.0:
   version "5.2.1"
   resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
   integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
@@ -9242,12 +9143,19 @@
   resolved "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz"
   integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
 
-scheduler@^0.23.2:
-  version "0.23.2"
-  resolved "https://registry.npmmirror.com/scheduler/-/scheduler-0.23.2.tgz"
-  integrity sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==
+scheduler@^0.25.0:
+  version "0.25.0"
+  resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.25.0.tgz#336cd9768e8cceebf52d3c80e3dcf5de23e7e015"
+  integrity sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==
+
+schema-utils@2.7.0:
+  version "2.7.0"
+  resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz"
+  integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
   dependencies:
-    loose-envify "^1.1.0"
+    "@types/json-schema" "^7.0.4"
+    ajv "^6.12.2"
+    ajv-keywords "^3.4.1"
 
 schema-utils@^3.0.0:
   version "3.3.0"
@@ -9268,15 +9176,6 @@
     ajv-formats "^2.1.1"
     ajv-keywords "^5.1.0"
 
-schema-utils@2.7.0:
-  version "2.7.0"
-  resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz"
-  integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
-  dependencies:
-    "@types/json-schema" "^7.0.4"
-    ajv "^6.12.2"
-    ajv-keywords "^3.4.1"
-
 section-matter@^1.0.0:
   version "1.0.0"
   resolved "https://registry.npmmirror.com/section-matter/-/section-matter-1.0.0.tgz"
@@ -9305,7 +9204,7 @@
   dependencies:
     semver "^7.3.5"
 
-semver@^5.6.0:
+"semver@2 || 3 || 4 || 5", semver@^5.6.0:
   version "5.7.1"
   resolved "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz"
   integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==
@@ -9320,47 +9219,18 @@
   resolved "https://registry.npmmirror.com/semver/-/semver-6.3.1.tgz"
   integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
 
-semver@^7.1.1:
+semver@^7.1.1, semver@^7.3.2, semver@^7.3.4:
   version "7.3.7"
   resolved "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz"
   integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==
   dependencies:
     lru-cache "^6.0.0"
 
-semver@^7.3.2:
-  version "7.3.7"
-  resolved "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz"
-  integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==
-  dependencies:
-    lru-cache "^6.0.0"
-
-semver@^7.3.4:
-  version "7.3.7"
-  resolved "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz"
-  integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==
-  dependencies:
-    lru-cache "^6.0.0"
-
-semver@^7.3.5:
+semver@^7.3.5, semver@^7.3.7, semver@^7.5.4:
   version "7.7.1"
   resolved "https://registry.npmmirror.com/semver/-/semver-7.7.1.tgz"
   integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==
 
-semver@^7.3.7:
-  version "7.7.1"
-  resolved "https://registry.npmmirror.com/semver/-/semver-7.7.1.tgz"
-  integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==
-
-semver@^7.5.4:
-  version "7.7.1"
-  resolved "https://registry.npmmirror.com/semver/-/semver-7.7.1.tgz"
-  integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==
-
-"semver@2 || 3 || 4 || 5":
-  version "5.7.1"
-  resolved "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz"
-  integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==
-
 send@0.19.0:
   version "0.19.0"
   resolved "https://registry.npmmirror.com/send/-/send-0.19.0.tgz"
@@ -9668,13 +9538,6 @@
     select-hose "^2.0.0"
     spdy-transport "^3.0.0"
 
-split@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.npmjs.org/split/-/split-1.0.1.tgz"
-  integrity sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==
-  dependencies:
-    through "2"
-
 split2@^3.0.0:
   version "3.2.2"
   resolved "https://registry.npmjs.org/split2/-/split2-3.2.2.tgz"
@@ -9682,6 +9545,13 @@
   dependencies:
     readable-stream "^3.0.0"
 
+split@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.npmjs.org/split/-/split-1.0.1.tgz"
+  integrity sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==
+  dependencies:
+    through "2"
+
 sprintf-js@~1.0.2:
   version "1.0.3"
   resolved "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz"
@@ -9712,35 +9582,21 @@
     stringify-package "^1.0.1"
     yargs "^16.0.0"
 
-"statuses@>= 1.4.0 < 2":
-  version "1.5.0"
-  resolved "https://registry.npmmirror.com/statuses/-/statuses-1.5.0.tgz"
-  integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
-
 statuses@2.0.1:
   version "2.0.1"
   resolved "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz"
   integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==
 
+"statuses@>= 1.4.0 < 2":
+  version "1.5.0"
+  resolved "https://registry.npmmirror.com/statuses/-/statuses-1.5.0.tgz"
+  integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
+
 std-env@^3.7.0:
   version "3.8.1"
   resolved "https://registry.npmmirror.com/std-env/-/std-env-3.8.1.tgz"
   integrity sha512-vj5lIj3Mwf9D79hBkltk5qmkFI+biIKWS2IBxEyEU3AX1tUf7AoL8nSazCOiiqQsGKIq01SClsKEzweu34uwvA==
 
-string_decoder@^1.1.1:
-  version "1.3.0"
-  resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
-  integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
-  dependencies:
-    safe-buffer "~5.2.0"
-
-string_decoder@~1.1.1:
-  version "1.1.1"
-  resolved "https://registry.npmmirror.com/string_decoder/-/string_decoder-1.1.1.tgz"
-  integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
-  dependencies:
-    safe-buffer "~5.1.0"
-
 string-width@^4.1.0, string-width@^4.2.0:
   version "4.2.3"
   resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
@@ -9759,6 +9615,20 @@
     emoji-regex "^9.2.2"
     strip-ansi "^7.0.1"
 
+string_decoder@^1.1.1:
+  version "1.3.0"
+  resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
+  integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
+  dependencies:
+    safe-buffer "~5.2.0"
+
+string_decoder@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.npmmirror.com/string_decoder/-/string_decoder-1.1.1.tgz"
+  integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
+  dependencies:
+    safe-buffer "~5.1.0"
+
 stringify-entities@^4.0.0:
   version "4.0.4"
   resolved "https://registry.npmmirror.com/stringify-entities/-/stringify-entities-4.0.4.tgz"
@@ -9800,16 +9670,16 @@
   resolved "https://registry.npmmirror.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz"
   integrity sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==
 
-strip-bom@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz"
-  integrity sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==
-
 strip-bom@4.0.0:
   version "4.0.0"
   resolved "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz"
   integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==
 
+strip-bom@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz"
+  integrity sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==
+
 strip-final-newline@^2.0.0:
   version "2.0.0"
   resolved "https://registry.npmmirror.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz"
@@ -9822,7 +9692,7 @@
   dependencies:
     min-indent "^1.0.0"
 
-strip-json-comments@^3.1.1, strip-json-comments@3.1.1:
+strip-json-comments@3.1.1, strip-json-comments@^3.1.1:
   version "3.1.1"
   resolved "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz"
   integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
@@ -9939,11 +9809,6 @@
   resolved "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz"
   integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==
 
-through@^2.3.6, "through@>=2.2.7 <3", through@2:
-  version "2.3.8"
-  resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz"
-  integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==
-
 through2@^2.0.0:
   version "2.0.5"
   resolved "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz"
@@ -9959,6 +9824,11 @@
   dependencies:
     readable-stream "3"
 
+through@2, "through@>=2.2.7 <3", through@^2.3.6:
+  version "2.3.8"
+  resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz"
+  integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==
+
 thunky@^1.0.2:
   version "1.1.0"
   resolved "https://registry.npmmirror.com/thunky/-/thunky-1.1.0.tgz"
@@ -10438,10 +10308,10 @@
   resolved "https://registry.npmmirror.com/typescript/-/typescript-4.9.5.tgz"
   integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
 
-typescript@^5.2.2:
-  version "5.8.2"
-  resolved "https://registry.npmmirror.com/typescript/-/typescript-5.8.2.tgz"
-  integrity sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==
+typescript@~5.6.2:
+  version "5.6.3"
+  resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.6.3.tgz#5f3449e31c9d94febb17de03cc081dd56d81db5b"
+  integrity sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==
 
 uglify-js@^3.1.4:
   version "3.17.0"
@@ -10551,7 +10421,7 @@
   resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz"
   integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==
 
-unpipe@~1.0.0, unpipe@1.0.0:
+unpipe@1.0.0, unpipe@~1.0.0:
   version "1.0.0"
   resolved "https://registry.npmmirror.com/unpipe/-/unpipe-1.0.0.tgz"
   integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==
@@ -10830,7 +10700,7 @@
     std-env "^3.7.0"
     wrap-ansi "^7.0.0"
 
-websocket-driver@^0.7.4, websocket-driver@>=0.5.1:
+websocket-driver@>=0.5.1, websocket-driver@^0.7.4:
   version "0.7.4"
   resolved "https://registry.npmmirror.com/websocket-driver/-/websocket-driver-0.7.4.tgz"
   integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==