atomic: Avoid casts and/or use correct ones.
* atomic/os390/atomic.c(apr_atomic_xchgptr):
Dereferencing without casting is fine/better.
* atomic/unix/mutex.c(apr_atomic_casptr, apr_atomic_xchgptr):
Dereferencing without casting is fine/better.
* atomic/win32/apr_atomic.c(apr_atomic_add32, apr_atomic_sub32,
apr_atomic_inc32, apr_atomic_dev32,
apr_atomic_set32, apr_atomic_cas32,
apr_atomic_xchg32):
Native Interlocked 32bit functions expect "long volatile *",
don't cast out volatility.
* atomic/win32/apr_atomic.c(apr_atomic_casptr):
32bit InterlockedCompareExchangePointer() expects "long volatile *",
don't cast to (void **).
* atomic/win32/apr_atomic.c(apr_atomic_xchgptr):
InterlockedExchangePointer() for both 32bit and 64bit expects
"void *volatile *", no need to cast.
git-svn-id: https://svn.apache.org/repos/asf/apr/apr/trunk@1902257 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/atomic/os390/atomic.c b/atomic/os390/atomic.c
index 249ee15..9442f27 100644
--- a/atomic/os390/atomic.c
+++ b/atomic/os390/atomic.c
@@ -124,7 +124,7 @@
{
void *old_ptr;
- old_ptr = *(void **)mem_ptr; /* old is automatically updated on cs failure */
+ old_ptr = *mem_ptr; /* old is automatically updated on cs failure */
#if APR_SIZEOF_VOIDP == 4
do {
} while (__cs1(&old_ptr, mem_ptr, &new_ptr));
diff --git a/atomic/unix/mutex.c b/atomic/unix/mutex.c
index 78ad753..61919ec 100644
--- a/atomic/unix/mutex.c
+++ b/atomic/unix/mutex.c
@@ -180,7 +180,7 @@
void *prev;
DECLARE_MUTEX_LOCKED(mutex, *mem);
- prev = *(void **)mem;
+ prev = *mem;
if (prev == cmp) {
*mem = with;
}
@@ -195,7 +195,7 @@
void *prev;
DECLARE_MUTEX_LOCKED(mutex, *mem);
- prev = *(void **)mem;
+ prev = *mem;
*mem = with;
MUTEX_UNLOCK(mutex);
diff --git a/atomic/win32/apr_atomic.c b/atomic/win32/apr_atomic.c
index 75abf92..ba48589 100644
--- a/atomic/win32/apr_atomic.c
+++ b/atomic/win32/apr_atomic.c
@@ -30,7 +30,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64))
return InterlockedExchangeAdd(mem, val);
#else
- return InterlockedExchangeAdd((long *)mem, val);
+ return InterlockedExchangeAdd((long volatile *)mem, val);
#endif
}
@@ -44,7 +44,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64))
InterlockedExchangeAdd(mem, -val);
#else
- InterlockedExchangeAdd((long *)mem, -val);
+ InterlockedExchangeAdd((long volatile *)mem, -val);
#endif
}
@@ -54,7 +54,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED)
return InterlockedIncrement(mem) - 1;
#else
- return InterlockedIncrement((long *)mem) - 1;
+ return InterlockedIncrement((long volatile *)mem) - 1;
#endif
}
@@ -63,7 +63,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED)
return InterlockedDecrement(mem);
#else
- return InterlockedDecrement((long *)mem);
+ return InterlockedDecrement((long volatile *)mem);
#endif
}
@@ -72,7 +72,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED)
InterlockedExchange(mem, val);
#else
- InterlockedExchange((long*)mem, val);
+ InterlockedExchange((long volatile *)mem, val);
#endif
}
@@ -87,7 +87,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED)
return InterlockedCompareExchange(mem, with, cmp);
#else
- return InterlockedCompareExchange((long*)mem, with, cmp);
+ return InterlockedCompareExchange((long volatile *)mem, with, cmp);
#endif
}
@@ -96,7 +96,7 @@
#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED)
return InterlockedCompareExchangePointer(mem, with, (void*)cmp);
#else
- return InterlockedCompareExchangePointer((void**)mem, with, (void*)cmp);
+ return InterlockedCompareExchangePointer((long volatile *)mem, with, (void*)cmp);
#endif
}
@@ -105,11 +105,11 @@
#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED)
return InterlockedExchange(mem, val);
#else
- return InterlockedExchange((long *)mem, val);
+ return InterlockedExchange((long volatile *)mem, val);
#endif
}
APR_DECLARE(void*) apr_atomic_xchgptr(void *volatile *mem, void *with)
{
- return InterlockedExchangePointer((void**)mem, with);
+ return InterlockedExchangePointer(mem, with);
}