Backport from HEAD:

* include/apr_rmm.h: Document alignment requirement for apr_rmm_init,
and hence the alignement guarantee which apr_rmm_addr_get can offer.

* misc/apr_rmm.c: Ensure allocated blocks use default address
alignment by using aligned structure sizes throughout.

* misc/apr_rmm.c (find_block_of_size): Remove redundant conditional.
(apr_rmm_init, apr_rmm_destroy, apr_rmm_free): Catch some _UNLOCK
return values.

* misc/apr_rmm.c (apr_rmm_overhead_get): Account for worst case
alignment overhead too.

* test/testrmm.c (test_rmm): Check for address alignment.

PR: 29873


git-svn-id: https://svn.apache.org/repos/asf/apr/apr-util/branches/APU_0_9_BRANCH@59137 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES b/CHANGES
index fcbaeef..e9f7ffd 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,8 @@
 Changes with APR-util 0.9.5
 
+  *) Guarantee and require default address alignment for block offsets
+     within segments in the apr_rmm interface.  PR 29873.  [Joe Orton]
+
   *) SECURITY: CAN-2004-0786 (cve.mitre.org)
      Fix input validation in apr_uri_parse() to avoid passing negative
      length to memcpy for malformed IPv6 literal addresses.
diff --git a/include/apr_rmm.h b/include/apr_rmm.h
index 6c0f9d8..b99471f 100644
--- a/include/apr_rmm.h
+++ b/include/apr_rmm.h
@@ -48,6 +48,8 @@
  * @param membuf The block of relocateable memory to be managed
  * @param memsize The size of relocateable memory block to be managed
  * @param cont The pool to use for local storage and management
+ * @remark Both @param membuf and @param memsize must be aligned
+ * (for instance using APR_ALIGN_DEFAULT).
  */
 APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
                                        void* membuf, apr_size_t memsize, 
@@ -108,6 +110,7 @@
  * Retrieve the physical address of a relocatable allocation of memory
  * @param rmm The relocatable memory block
  * @param entity The memory allocation to free
+ * @return address The address, aligned with APR_ALIGN_DEFAULT.
  */
 APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity);
 
diff --git a/misc/apr_rmm.c b/misc/apr_rmm.c
index a4accb0..0d4f020 100644
--- a/misc/apr_rmm.c
+++ b/misc/apr_rmm.c
@@ -33,6 +33,9 @@
     apr_rmm_off_t /* rmm_block_t */ firstfree;
 } rmm_hdr_block_t;
 
+#define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
+#define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
+
 struct apr_rmm_t {
     apr_pool_t *p;
     rmm_hdr_block_t *base;
@@ -87,7 +90,7 @@
         next = blk->next;
     }
 
-    if (bestsize > sizeof(rmm_block_t) + size) {
+    if (bestsize > RMM_BLOCK_SIZE + size) {
         struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
         struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
 
@@ -104,11 +107,7 @@
         }
     }
 
-    if (best) {
-        return best;
-    }
-     
-    return 0;
+    return best;
 }
 
 static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
@@ -205,7 +204,7 @@
 
     (*rmm)->base->abssize = size;
     (*rmm)->base->firstused = 0;
-    (*rmm)->base->firstfree = sizeof(rmm_hdr_block_t);
+    (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
 
     blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
 
@@ -213,8 +212,7 @@
     blk->prev = 0;
     blk->next = 0;
 
-    APR_ANYLOCK_UNLOCK(lock);
-    return APR_SUCCESS;
+    return APR_ANYLOCK_UNLOCK(lock);
 }
 
 APU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
@@ -247,8 +245,7 @@
     rmm->base->abssize = 0;
     rmm->size = 0;
 
-    APR_ANYLOCK_UNLOCK(&rmm->lock);
-    return APR_SUCCESS;
+    return APR_ANYLOCK_UNLOCK(&rmm->lock);
 }
 
 APU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
@@ -281,15 +278,15 @@
 {
     apr_rmm_off_t this;
     
-    reqsize = APR_ALIGN_DEFAULT(reqsize);
+    reqsize = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
 
     APR_ANYLOCK_LOCK(&rmm->lock);
 
-    this = find_block_of_size(rmm, reqsize + sizeof(rmm_block_t));
+    this = find_block_of_size(rmm, reqsize);
 
     if (this) {
         move_block(rmm, this, 0);
-        this += sizeof(rmm_block_t);
+        this += RMM_BLOCK_SIZE;
     }
 
     APR_ANYLOCK_UNLOCK(&rmm->lock);
@@ -300,16 +297,16 @@
 {
     apr_rmm_off_t this;
         
-    reqsize = APR_ALIGN_DEFAULT(reqsize);
+    reqsize = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
 
     APR_ANYLOCK_LOCK(&rmm->lock);
 
-    this = find_block_of_size(rmm, reqsize + sizeof(rmm_block_t));
+    this = find_block_of_size(rmm, reqsize);
 
     if (this) {
         move_block(rmm, this, 0);
-        this += sizeof(rmm_block_t);
-        memset((char*)rmm->base + this, 0, reqsize);
+        this += RMM_BLOCK_SIZE;
+        memset((char*)rmm->base + this, 0, reqsize - RMM_BLOCK_SIZE);
     }
 
     APR_ANYLOCK_UNLOCK(&rmm->lock);
@@ -332,7 +329,7 @@
     old = apr_rmm_offset_get(rmm, entity);
 
     if ((this = apr_rmm_malloc(rmm, reqsize)) == 0) {
-        return this;
+        return 0;
     }
 
     blk = (rmm_block_t*)((char*)rmm->base + old);
@@ -353,11 +350,11 @@
     /* A little sanity check is always healthy, especially here.
      * If we really cared, we could make this compile-time
      */
-    if (this < sizeof(rmm_hdr_block_t) + sizeof(rmm_block_t)) {
+    if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
         return APR_EINVAL;
     }
 
-    this -= sizeof(rmm_block_t);
+    this -= RMM_BLOCK_SIZE;
 
     blk = (rmm_block_t*)((char*)rmm->base + this);
 
@@ -390,8 +387,7 @@
      */
     move_block(rmm, this, 1);
     
-    APR_ANYLOCK_UNLOCK(&rmm->lock);
-    return APR_SUCCESS;
+    return APR_ANYLOCK_UNLOCK(&rmm->lock);
 }
 
 APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity) 
@@ -413,5 +409,8 @@
 
 APU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n) 
 {
-    return sizeof(rmm_hdr_block_t) + n * sizeof(rmm_block_t);
+    /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
+     * for alignment overhead, plus the size of the rmm_block_t
+     * structure. */
+    return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));
 }
diff --git a/test/testrmm.c b/test/testrmm.c
index c4dc989..8203be1 100644
--- a/test/testrmm.c
+++ b/test/testrmm.c
@@ -51,7 +51,7 @@
     }
 
     /* We're going to want 10 blocks of data from our target rmm. */
-    size = SHARED_SIZE + apr_rmm_overhead_get(FRAG_COUNT);
+    size = SHARED_SIZE + apr_rmm_overhead_get(FRAG_COUNT + 1);
     printf("Creating anonymous shared memory (%"
            APR_SIZE_T_FMT " bytes).....", size); 
     rv = apr_shm_create(&shm, size, NULL, pool);
@@ -87,6 +87,24 @@
     else {
         return APR_EGENERAL;  
     }
+
+    printf("Checking each fragment for address alignment.....");
+    for (i = 0; i < FRAG_COUNT; i++) {
+        char *c = apr_rmm_addr_get(rmm, off[i]);
+        apr_size_t sc = (apr_size_t)c;
+
+        if (off[i] == 0) {
+            printf("allocation failed for offset %d\n", i);
+            return APR_ENOMEM;
+        }
+
+        if (sc & 7) {
+            printf("Bad alignment for fragment %d; %p not %p!\n",
+                   i, c, (void *)APR_ALIGN_DEFAULT((apr_size_t)c));
+            return APR_EGENERAL;
+        }
+    }
+    fprintf(stdout, "OK\n");   
     
     printf("Setting each fragment to a unique value..........");
     for (i = 0; i < FRAG_COUNT; i++) {