Merge pull request #910 from andrzej-kaczmarek/cmac-tx-late-one-more-attempt
nimble/phy: Fix tx_late handling
diff --git a/porting/nimble/include/mem/mem.h b/porting/nimble/include/mem/mem.h
index a97e148..1c29efb 100644
--- a/porting/nimble/include/mem/mem.h
+++ b/porting/nimble/include/mem/mem.h
@@ -61,6 +61,8 @@
struct os_mbuf *mem_split_frag(struct os_mbuf **om, uint16_t max_frag_sz,
mem_frag_alloc_fn *alloc_cb, void *cb_arg);
+void *mem_pullup_obj(struct os_mbuf **om, uint16_t len);
+
#ifdef __cplusplus
}
#endif
diff --git a/porting/nimble/include/os/endian.h b/porting/nimble/include/os/endian.h
index e023075..021a73e 100644
--- a/porting/nimble/include/os/endian.h
+++ b/porting/nimble/include/os/endian.h
@@ -186,7 +186,7 @@
#endif
#ifndef htobe64
-#define htobe64(x) os_bswap64(x)
+#define htobe64(x) os_bswap_64(x)
#endif
#ifndef htole64
@@ -194,7 +194,7 @@
#endif
#ifndef be64toh
-#define be64toh(x) os_bswap64(x)
+#define be64toh(x) os_bswap_64(x)
#endif
#ifndef le64toh
diff --git a/porting/nimble/include/os/os_cputime.h b/porting/nimble/include/os/os_cputime.h
index 20124b5..17ad877 100644
--- a/porting/nimble/include/os/os_cputime.h
+++ b/porting/nimble/include/os/os_cputime.h
@@ -17,12 +17,12 @@
* under the License.
*/
- /**
- * @addtogroup OSKernel
- * @{
- * @defgroup OSCPUTime High Resolution Timers
- * @{
- */
+/**
+ * @addtogroup OSKernel
+ * @{
+ * @defgroup OSCPUTime High Resolution Timers
+ * @{
+ */
#ifndef H_OS_CPUTIME_
#define H_OS_CPUTIME_
@@ -33,7 +33,6 @@
#include "syscfg/syscfg.h"
#include "hal/hal_timer.h"
-#include "os/os.h"
/*
* NOTE: these definitions allow one to override the cputime frequency used.
@@ -88,9 +87,9 @@
#define CPUTIME_LT(__t1, __t2) ((int32_t) ((__t1) - (__t2)) < 0)
/** evaluates to true if t1 is after t2 in time */
#define CPUTIME_GT(__t1, __t2) ((int32_t) ((__t1) - (__t2)) > 0)
-/** evaluates to true if t1 is after t2 in time */
-#define CPUTIME_GEQ(__t1, __t2) ((int32_t) ((__t1) - (__t2)) >= 0)
/** evaluates to true if t1 is on or after t2 in time */
+#define CPUTIME_GEQ(__t1, __t2) ((int32_t) ((__t1) - (__t2)) >= 0)
+/** evaluates to true if t1 is on or before t2 in time */
#define CPUTIME_LEQ(__t1, __t2) ((int32_t) ((__t1) - (__t2)) <= 0)
/**
@@ -188,7 +187,7 @@
* @param arg Pointer to data object to pass to timer.
*/
void os_cputime_timer_init(struct hal_timer *timer, hal_timer_cb fp,
- void *arg);
+ void *arg);
/**
* Start a cputimer that will expire at 'cputime'. If cputime has already
diff --git a/porting/nimble/include/os/os_error.h b/porting/nimble/include/os/os_error.h
index 15cc622..71f8e11 100644
--- a/porting/nimble/include/os/os_error.h
+++ b/porting/nimble/include/os/os_error.h
@@ -22,6 +22,11 @@
#include "os/os.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* OS error enumerations */
enum os_error {
OS_OK = 0,
OS_ENOMEM = 1,
@@ -40,4 +45,18 @@
typedef enum os_error os_error_t;
+/**
+ * @brief Converts an OS error code (`OS_[...]`) to an equivalent system error
+ * code (`SYS_E[...]`).
+ *
+ * @param os_error The OS error code to convert.
+ *
+ * @return The equivalent system error code.
+ */
+int os_error_to_sys(os_error_t os_error);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/porting/nimble/include/os/os_mbuf.h b/porting/nimble/include/os/os_mbuf.h
index f3857fe..771ea76 100644
--- a/porting/nimble/include/os/os_mbuf.h
+++ b/porting/nimble/include/os/os_mbuf.h
@@ -189,7 +189,7 @@
}
leadingspace = (uint16_t) (OS_MBUF_DATA(om, uint8_t *) -
- ((uint8_t *) &om->om_databuf[0] + startoff));
+ ((uint8_t *) &om->om_databuf[0] + startoff));
return (leadingspace);
}
@@ -220,7 +220,7 @@
omp = om->om_omp;
return (&om->om_databuf[0] + omp->omp_databuf_len) -
- (om->om_data + om->om_len);
+ (om->om_data + om->om_len);
}
/** @endcond */
@@ -347,7 +347,7 @@
* @return 0 on success, error code on failure.
*/
int os_mbuf_pool_init(struct os_mbuf_pool *, struct os_mempool *mp,
- uint16_t, uint16_t);
+ uint16_t, uint16_t);
/**
* Get an mbuf from the mbuf pool. The mbuf is allocated, and initialized
@@ -370,7 +370,7 @@
* @return A freshly allocated mbuf on success, NULL on failure.
*/
struct os_mbuf *os_mbuf_get_pkthdr(struct os_mbuf_pool *omp,
- uint8_t pkthdr_len);
+ uint8_t pkthdr_len);
/**
* Duplicate a chain of mbufs. Return the start of the duplicated chain.
@@ -414,6 +414,20 @@
int os_mbuf_copydata(const struct os_mbuf *m, int off, int len, void *dst);
/**
+ * @brief Calculates the length of an mbuf chain.
+ *
+ * Calculates the length of an mbuf chain. If the mbuf contains a packet
+ * header, you should use `OS_MBUF_PKTLEN()` as a more efficient alternative to
+ * this function.
+ *
+ * @param om The mbuf to measure.
+ *
+ * @return The length, in bytes, of the provided mbuf
+ * chain.
+ */
+uint16_t os_mbuf_len(const struct os_mbuf *om);
+
+/**
* Append data onto a mbuf
*
* @param om The mbuf to append the data onto
@@ -615,6 +629,22 @@
struct os_mbuf *os_mbuf_trim_front(struct os_mbuf *om);
/**
+ * Increases the length of an mbuf chain by inserting a gap at the specified
+ * offset. The contents of the gap are indeterminate. If the mbuf chain
+ * contains a packet header, its total length is increased accordingly.
+ *
+ * This function never frees the provided mbuf chain.
+ *
+ * @param om The mbuf chain to widen.
+ * @param off The offset at which to insert the gap.
+ * @param len The size of the gap to insert.
+ *
+ * @return 0 on success; SYS_[...] error code on failure.
+ */
+int os_mbuf_widen(struct os_mbuf *om, uint16_t off, uint16_t len);
+
+
+/**
* Creates a single chained mbuf from m1 and m2 utilizing all
* the available buffer space in all mbufs in the resulting
* chain. In other words, ensures there is no leading space in
diff --git a/porting/nimble/include/os/os_mempool.h b/porting/nimble/include/os/os_mempool.h
index c69fb3d..71d7706 100644
--- a/porting/nimble/include/os/os_mempool.h
+++ b/porting/nimble/include/os/os_mempool.h
@@ -66,7 +66,7 @@
/** Bitmap of OS_MEMPOOL_F_[...] values. */
uint8_t mp_flags;
/** Address of memory buffer used by pool */
- uintptr_t mp_membuf_addr;
+ uint32_t mp_membuf_addr;
STAILQ_ENTRY(os_mempool) mp_list;
SLIST_HEAD(,os_memblock);
/** Name for memory block */
@@ -140,20 +140,31 @@
* when at the last memory pool.
*/
struct os_mempool *os_mempool_info_get_next(struct os_mempool *,
- struct os_mempool_info *);
+ struct os_mempool_info *);
/*
* To calculate size of the memory buffer needed for the pool. NOTE: This size
* is NOT in bytes! The size is the number of os_membuf_t elements required for
* the memory pool.
*/
-#if (OS_CFG_ALIGNMENT == OS_CFG_ALIGN_4)
-#define OS_MEMPOOL_SIZE(n,blksize) ((((blksize) + 3) / 4) * (n))
-typedef uint32_t os_membuf_t;
+#if MYNEWT_VAL(OS_MEMPOOL_GUARD)
+/*
+ * Leave extra 4 bytes of guard area at the end.
+ */
+#define OS_MEMPOOL_BLOCK_SZ(sz) ((sz) + sizeof(os_membuf_t))
#else
-#define OS_MEMPOOL_SIZE(n,blksize) ((((blksize) + 7) / 8) * (n))
-typedef uint64_t os_membuf_t;
+#define OS_MEMPOOL_BLOCK_SZ(sz) (sz)
#endif
+#if (OS_ALIGNMENT == 4)
+typedef uint32_t os_membuf_t;
+#elif (OS_ALIGNMENT == 8)
+typedef uint64_t os_membuf_t;
+#elif (OS_ALIGNMENT == 16)
+typedef __uint128_t os_membuf_t;
+#else
+#error "Unhandled `OS_ALIGNMENT` for `os_membuf_t`"
+#endif /* OS_ALIGNMENT == * */
+#define OS_MEMPOOL_SIZE(n,blksize) ((((blksize) + ((OS_ALIGNMENT)-1)) / (OS_ALIGNMENT)) * (n))
/** Calculates the number of bytes required to initialize a memory pool. */
#define OS_MEMPOOL_BYTES(n,blksize) \
@@ -190,6 +201,17 @@
uint32_t block_size, void *membuf, char *name);
/**
+ * Removes the specified mempool from the list of initialized mempools.
+ *
+ * @param mp The mempool to unregister.
+ *
+ * @return 0 on success;
+ * OS_INVALID_PARM if the mempool is not
+ * registered.
+ */
+os_error_t os_mempool_unregister(struct os_mempool *mp);
+
+/**
* Clears a memory pool.
*
* @param mp The mempool to clear.
diff --git a/porting/nimble/include/os/os_trace_api.h b/porting/nimble/include/os/os_trace_api.h
index 4f1aa03..d9fc66d 100644
--- a/porting/nimble/include/os/os_trace_api.h
+++ b/porting/nimble/include/os/os_trace_api.h
@@ -20,7 +20,21 @@
#ifndef OS_TRACE_API_H
#define OS_TRACE_API_H
-#include <stdint.h>
+#ifdef __ASSEMBLER__
+
+#define os_trace_isr_enter SEGGER_SYSVIEW_RecordEnterISR
+#define os_trace_isr_exit SEGGER_SYSVIEW_RecordExitISR
+#define os_trace_task_start_exec SEGGER_SYSVIEW_OnTaskStartExec
+
+#else
+
+#include <stdio.h>
+#include <string.h>
+#include "syscfg/syscfg.h"
+#if MYNEWT_VAL(OS_SYSVIEW)
+#include "sysview/vendor/SEGGER_SYSVIEW.h"
+#endif
+#include "os/os.h"
#define OS_TRACE_ID_EVENTQ_PUT (40)
#define OS_TRACE_ID_EVENTQ_GET_NO_WAIT (41)
@@ -34,6 +48,163 @@
#define OS_TRACE_ID_SEM_INIT (60)
#define OS_TRACE_ID_SEM_RELEASE (61)
#define OS_TRACE_ID_SEM_PEND (62)
+#define OS_TRACE_ID_CALLOUT_INIT (70)
+#define OS_TRACE_ID_CALLOUT_STOP (71)
+#define OS_TRACE_ID_CALLOUT_RESET (72)
+#define OS_TRACE_ID_CALLOUT_TICK (73)
+#define OS_TRACE_ID_MEMBLOCK_GET (80)
+#define OS_TRACE_ID_MEMBLOCK_PUT_FROM_CB (81)
+#define OS_TRACE_ID_MEMBLOCK_PUT (82)
+#define OS_TRACE_ID_MBUF_GET (90)
+#define OS_TRACE_ID_MBUF_GET_PKTHDR (91)
+#define OS_TRACE_ID_MBUF_FREE (92)
+#define OS_TRACE_ID_MBUF_FREE_CHAIN (93)
+
+#if MYNEWT_VAL(OS_SYSVIEW)
+
+typedef struct SEGGER_SYSVIEW_MODULE_STRUCT os_trace_module_t;
+
+static inline uint32_t
+os_trace_module_register(os_trace_module_t *m, const char *name,
+ uint32_t num_events, void (* send_desc_func)(void))
+{
+ char *desc = "M=???";
+
+ asprintf(&desc, "M=%s", name);
+
+ memset(m, 0, sizeof(*m));
+ m->sModule = desc;
+ m->NumEvents = num_events;
+ m->pfSendModuleDesc = send_desc_func;
+
+ SEGGER_SYSVIEW_RegisterModule(m);
+
+ return m->EventOffset;
+}
+
+static inline void
+os_trace_module_desc(const os_trace_module_t *m, const char *desc)
+{
+ SEGGER_SYSVIEW_RecordModuleDescription(m, desc);
+}
+
+static inline void
+os_trace_isr_enter(void)
+{
+ SEGGER_SYSVIEW_RecordEnterISR();
+}
+
+static inline void
+os_trace_isr_exit(void)
+{
+ SEGGER_SYSVIEW_RecordExitISR();
+}
+
+static inline void
+os_trace_task_info(const struct ble_npl_task *t)
+{
+ SEGGER_SYSVIEW_TASKINFO ti;
+
+ ti.TaskID = (uint32_t)t;
+ ti.sName = t->t_name;
+ ti.Prio = t->t_prio;
+ ti.StackSize = t->t_stacksize * sizeof(os_stack_t);
+ ti.StackBase = (uint32_t)&t->t_stackbottom + ti.StackSize;
+
+ SEGGER_SYSVIEW_SendTaskInfo(&ti);
+}
+
+static inline void
+os_trace_task_create(const struct ble_npl_task *t)
+{
+ SEGGER_SYSVIEW_OnTaskCreate((uint32_t)t);
+}
+
+static inline void
+os_trace_task_start_exec(const struct ble_npl_task *t)
+{
+ SEGGER_SYSVIEW_OnTaskStartExec((uint32_t)t);
+}
+
+static inline void
+os_trace_task_stop_exec(void)
+{
+ SEGGER_SYSVIEW_OnTaskStopExec();
+}
+
+static inline void
+os_trace_task_start_ready(const struct ble_npl_task *t)
+{
+ SEGGER_SYSVIEW_OnTaskStartReady((uint32_t)t);
+}
+
+static inline void
+os_trace_task_stop_ready(const struct ble_npl_task *t, unsigned reason)
+{
+ SEGGER_SYSVIEW_OnTaskStopReady((uint32_t)t, reason);
+}
+
+static inline void
+os_trace_idle(void)
+{
+ SEGGER_SYSVIEW_OnIdle();
+}
+
+static inline void
+os_trace_user_start(unsigned id)
+{
+ SEGGER_SYSVIEW_OnUserStart(id);
+}
+
+static inline void
+os_trace_user_stop(unsigned id)
+{
+ SEGGER_SYSVIEW_OnUserStop(id);
+}
+
+#endif /* MYNEWT_VAL(OS_SYSVIEW) */
+
+#if MYNEWT_VAL(OS_SYSVIEW) && !defined(OS_TRACE_DISABLE_FILE_API)
+
+static inline void
+os_trace_api_void(unsigned id)
+{
+ SEGGER_SYSVIEW_RecordVoid(id);
+}
+
+static inline void
+os_trace_api_u32(unsigned id, uint32_t p0)
+{
+ SEGGER_SYSVIEW_RecordU32(id, p0);
+}
+
+static inline void
+os_trace_api_u32x2(unsigned id, uint32_t p0, uint32_t p1)
+{
+ SEGGER_SYSVIEW_RecordU32x2(id, p0, p1);
+}
+
+static inline void
+os_trace_api_u32x3(unsigned id, uint32_t p0, uint32_t p1, uint32_t p2)
+{
+ SEGGER_SYSVIEW_RecordU32x3(id, p0, p1, p2);
+}
+
+static inline void
+os_trace_api_ret(unsigned id)
+{
+ SEGGER_SYSVIEW_RecordEndCall(id);
+}
+
+static inline void
+os_trace_api_ret_u32(unsigned id, uint32_t ret)
+{
+ SEGGER_SYSVIEW_RecordEndCallU32(id, ret);
+}
+
+#endif /* MYNEWT_VAL(OS_SYSVIEW) && !defined(OS_TRACE_DISABLE_FILE_API) */
+
+#if !MYNEWT_VAL(OS_SYSVIEW)
static inline void
os_trace_isr_enter(void)
@@ -46,11 +217,30 @@
}
static inline void
+os_trace_task_stop_exec(void)
+{
+}
+
+static inline void
os_trace_idle(void)
{
}
static inline void
+os_trace_user_start(unsigned id)
+{
+}
+
+static inline void
+os_trace_user_stop(unsigned id)
+{
+}
+
+#endif /* !MYNEWT_VAL(OS_SYSVIEW) */
+
+#if !MYNEWT_VAL(OS_SYSVIEW) || defined(OS_TRACE_DISABLE_FILE_API)
+
+static inline void
os_trace_api_void(unsigned id)
{
}
@@ -80,4 +270,8 @@
{
}
-#endif
+#endif /* !MYNEWT_VAL(OS_SYSVIEW) || defined(OS_TRACE_DISABLE_FILE_API) */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* OS_TRACE_API_H */
diff --git a/porting/nimble/include/os/queue.h b/porting/nimble/include/os/queue.h
index faffd85..c19edb1 100644
--- a/porting/nimble/include/os/queue.h
+++ b/porting/nimble/include/os/queue.h
@@ -118,12 +118,12 @@
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
-
+
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
-
+
/*
* Singly-linked List functions.
*/
@@ -519,4 +519,4 @@
}
#endif
-#endif /* !_QUEUE_H_ */
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/porting/nimble/src/endian.c b/porting/nimble/src/endian.c
index 2afd6a2..1f5db2e 100644
--- a/porting/nimble/src/endian.c
+++ b/porting/nimble/src/endian.c
@@ -145,9 +145,9 @@
uint8_t *u8ptr;
u8ptr = buf;
- u8ptr[0] = (uint8_t)(x >> 24);
- u8ptr[1] = (uint8_t)(x >> 16);
- u8ptr[2] = (uint8_t)(x >> 8);
+ u8ptr[0] = (uint8_t)(x >> 16);
+ u8ptr[1] = (uint8_t)(x >> 8);
+ u8ptr[2] = (uint8_t)x;
}
void
@@ -198,9 +198,9 @@
uint32_t x;
u8ptr = buf;
- x = (uint32_t)u8ptr[0] << 24;
- x |= (uint32_t)u8ptr[1] << 16;
- x |= (uint32_t)u8ptr[2] << 8;
+ x = (uint32_t)u8ptr[0] << 16;
+ x |= (uint32_t)u8ptr[1] << 8;
+ x |= (uint32_t)u8ptr[2];
return x;
}
diff --git a/porting/nimble/src/hal_timer.c b/porting/nimble/src/hal_timer.c
index c67986a..568ab0c 100644
--- a/porting/nimble/src/hal_timer.c
+++ b/porting/nimble/src/hal_timer.c
@@ -24,6 +24,7 @@
#include "os/os.h"
#include "nrfx.h"
#include "hal/hal_timer.h"
+#include "os/os_trace_api.h"
/* IRQ prototype */
typedef void (*hal_timer_irq_handler_t)(void);
@@ -74,34 +75,34 @@
static const struct nrf52_hal_timer *nrf52_hal_timers[NRF52_HAL_TIMER_MAX] = {
#if MYNEWT_VAL(TIMER_0)
- &nrf52_hal_timer0,
+ &nrf52_hal_timer0,
#else
- NULL,
+ NULL,
#endif
#if MYNEWT_VAL(TIMER_1)
- &nrf52_hal_timer1,
+ &nrf52_hal_timer1,
#else
- NULL,
+ NULL,
#endif
#if MYNEWT_VAL(TIMER_2)
- &nrf52_hal_timer2,
+ &nrf52_hal_timer2,
#else
- NULL,
+ NULL,
#endif
#if MYNEWT_VAL(TIMER_3)
- &nrf52_hal_timer3,
+ &nrf52_hal_timer3,
#else
- NULL,
+ NULL,
#endif
#if MYNEWT_VAL(TIMER_4)
- &nrf52_hal_timer4,
+ &nrf52_hal_timer4,
#else
- NULL,
+ NULL,
#endif
#if MYNEWT_VAL(TIMER_5)
- &nrf52_hal_timer5
+ &nrf52_hal_timer5
#else
- NULL
+ NULL
#endif
};
@@ -163,14 +164,26 @@
delta_t = (int32_t)(expiry - temp);
/*
- * The nrf documentation states that you must set the output
- * compare to 2 greater than the counter to guarantee an interrupt.
- * Since the counter can tick once while we check, we make sure
- * it is greater than 2.
+ * The nRF52xxx documentation states that COMPARE event is guaranteed
+ * only if value written to CC register is at least 2 greater than the
+ * current counter value. We also need to account for possible extra
+ * tick during calculations so effectively any delta less than 3 needs
+ * to be handled differently. TICK event is used to have interrupt on
+ * each subsequent tick so we won't miss any and in case we detected
+ * mentioned extra tick during calculations, interrupt is triggered
+ * immediately. Delta 0 or less means we should always fire immediately.
*/
- if (delta_t < 3) {
+ if (delta_t < 1) {
+ rtctimer->INTENCLR = RTC_INTENCLR_TICK_Msk;
NVIC_SetPendingIRQ(bsptimer->tmr_irq_num);
- } else {
+ } else if (delta_t < 3) {
+ rtctimer->INTENSET = RTC_INTENSET_TICK_Msk;
+ if (rtctimer->COUNTER != cntr) {
+ NVIC_SetPendingIRQ(bsptimer->tmr_irq_num);
+ }
+ } else {
+ rtctimer->INTENCLR = RTC_INTENCLR_TICK_Msk;
+
if (delta_t < (1UL << 24)) {
rtctimer->CC[NRF_RTC_TIMER_CC_INT] = expiry & 0x00ffffff;
} else {
@@ -212,6 +225,7 @@
nrf_rtc_disable_ocmp(NRF_RTC_Type *rtctimer)
{
rtctimer->INTENCLR = NRF_TIMER_INT_MASK(NRF_RTC_TIMER_CC_INT);
+ rtctimer->INTENCLR = RTC_INTENCLR_TICK_Msk;
}
static uint32_t
@@ -250,7 +264,6 @@
static void
hal_timer_chk_queue(struct nrf52_hal_timer *bsptimer)
{
- int32_t delta;
uint32_t tcntr;
os_sr_t sr;
struct hal_timer *timer;
@@ -260,16 +273,10 @@
while ((timer = TAILQ_FIRST(&bsptimer->hal_timer_q)) != NULL) {
if (bsptimer->tmr_rtc) {
tcntr = hal_timer_read_bsptimer(bsptimer);
- /*
- * If we are within 3 ticks of RTC, we wont be able to set compare.
- * Thus, we have to service this timer early.
- */
- delta = -3;
} else {
tcntr = nrf_read_timer_cntr(bsptimer->tmr_reg);
- delta = 0;
}
- if ((int32_t)(tcntr - timer->expiry) >= delta) {
+ if ((int32_t)(tcntr - timer->expiry) >= 0) {
TAILQ_REMOVE(&bsptimer->hal_timer_q, timer, link);
timer->link.tqe_prev = NULL;
timer->cb_func(timer->cb_arg);
@@ -315,7 +322,7 @@
uint32_t compare;
NRF_TIMER_Type *hwtimer;
- os_trace_enter_isr();
+ os_trace_isr_enter();
/* Check interrupt source. If set, clear them */
hwtimer = bsptimer->tmr_reg;
@@ -343,7 +350,7 @@
compare = hwtimer->EVENTS_COMPARE[NRF_TIMER_CC_INT];
}
- os_trace_exit_isr();
+ os_trace_isr_exit();
}
#endif
@@ -353,13 +360,21 @@
{
uint32_t overflow;
uint32_t compare;
+ uint32_t tick;
NRF_RTC_Type *rtctimer;
+ os_trace_isr_enter();
+
/* Check interrupt source. If set, clear them */
rtctimer = (NRF_RTC_Type *)bsptimer->tmr_reg;
compare = rtctimer->EVENTS_COMPARE[NRF_RTC_TIMER_CC_INT];
if (compare) {
- rtctimer->EVENTS_COMPARE[NRF_RTC_TIMER_CC_INT] = 0;
+ rtctimer->EVENTS_COMPARE[NRF_RTC_TIMER_CC_INT] = 0;
+ }
+
+ tick = rtctimer->EVENTS_TICK;
+ if (tick) {
+ rtctimer->EVENTS_TICK = 0;
}
overflow = rtctimer->EVENTS_OVRFLW;
@@ -384,6 +399,8 @@
/* Recommended by nordic to make sure interrupts are cleared */
compare = rtctimer->EVENTS_COMPARE[NRF_RTC_TIMER_CC_INT];
+
+ os_trace_isr_exit();
}
#endif
@@ -463,17 +480,52 @@
}
switch (timer_num) {
-#if MYNEWT_VAL(TIMER_5)
- case 5:
- irq_num = RTC0_IRQn;
- hwtimer = NRF_RTC0;
- irq_isr = nrf52_timer5_irq_handler;
- bsptimer->tmr_rtc = 1;
+#if MYNEWT_VAL(TIMER_0)
+ case 0:
+ irq_num = TIMER0_IRQn;
+ hwtimer = NRF_TIMER0;
+ irq_isr = nrf52_timer0_irq_handler;
break;
#endif
- default:
- hwtimer = NULL;
+#if MYNEWT_VAL(TIMER_1)
+ case 1:
+ irq_num = TIMER1_IRQn;
+ hwtimer = NRF_TIMER1;
+ irq_isr = nrf52_timer1_irq_handler;
break;
+#endif
+#if MYNEWT_VAL(TIMER_2)
+ case 2:
+ irq_num = TIMER2_IRQn;
+ hwtimer = NRF_TIMER2;
+ irq_isr = nrf52_timer2_irq_handler;
+ break;
+#endif
+#if MYNEWT_VAL(TIMER_3)
+ case 3:
+ irq_num = TIMER3_IRQn;
+ hwtimer = NRF_TIMER3;
+ irq_isr = nrf52_timer3_irq_handler;
+ break;
+#endif
+#if MYNEWT_VAL(TIMER_4)
+ case 4:
+ irq_num = TIMER4_IRQn;
+ hwtimer = NRF_TIMER4;
+ irq_isr = nrf52_timer4_irq_handler;
+ break;
+#endif
+#if MYNEWT_VAL(TIMER_5)
+ case 5:
+ irq_num = RTC0_IRQn;
+ hwtimer = NRF_RTC0;
+ irq_isr = nrf52_timer5_irq_handler;
+ bsptimer->tmr_rtc = 1;
+ break;
+#endif
+ default:
+ hwtimer = NULL;
+ break;
}
if (hwtimer == NULL) {
@@ -486,18 +538,12 @@
/* Disable IRQ, set priority and set vector in table */
NVIC_DisableIRQ(irq_num);
-#ifndef RIOT_VERSION
NVIC_SetPriority(irq_num, (1 << __NVIC_PRIO_BITS) - 1);
-#endif
-#if MYNEWT
NVIC_SetVector(irq_num, (uint32_t)irq_isr);
-#else
- ble_npl_hw_set_isr(irq_num, irq_isr);
-#endif
return 0;
-err:
+ err:
return rc;
}
@@ -515,8 +561,13 @@
hal_timer_config(int timer_num, uint32_t freq_hz)
{
int rc;
+ uint8_t prescaler;
os_sr_t sr;
+ uint32_t div;
+ uint32_t min_delta;
+ uint32_t max_delta;
struct nrf52_hal_timer *bsptimer;
+ NRF_TIMER_Type *hwtimer;
#if MYNEWT_VAL(TIMER_5)
NRF_RTC_Type *rtctimer;
#endif
@@ -541,6 +592,7 @@
/* Stop the timer first */
rtctimer->TASKS_STOP = 1;
+ rtctimer->TASKS_CLEAR = 1;
/* Always no prescaler */
rtctimer->PRESCALER = 0;
@@ -560,11 +612,70 @@
}
#endif
- assert(0);
+ /* Set timer to desired frequency */
+ div = NRF52_MAX_TIMER_FREQ / freq_hz;
+
+ /*
+ * Largest prescaler is 2^9 and must make sure frequency not too high.
+ * If hwtimer is NULL it means that the timer was not initialized prior
+ * to call.
+ */
+ if (bsptimer->tmr_enabled || (div == 0) || (div > 512) ||
+ (bsptimer->tmr_reg == NULL)) {
+ rc = EINVAL;
+ goto err;
+ }
+
+ if (div == 1) {
+ prescaler = 0;
+ } else {
+ /* Find closest prescaler */
+ for (prescaler = 1; prescaler < 10; ++prescaler) {
+ if (div <= (1 << prescaler)) {
+ min_delta = div - (1 << (prescaler - 1));
+ max_delta = (1 << prescaler) - div;
+ if (min_delta < max_delta) {
+ prescaler -= 1;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Now set the actual frequency */
+ bsptimer->tmr_freq = NRF52_MAX_TIMER_FREQ / (1 << prescaler);
+ bsptimer->tmr_enabled = 1;
+
+ /* disable interrupts */
+ OS_ENTER_CRITICAL(sr);
+
+#if MYNEWT_VAL_CHOICE(MCU_HFCLK_SOURCE, HFXO)
+ /* Make sure HFXO is started */
+ nrf52_clock_hfxo_request();
+#endif
+ hwtimer = bsptimer->tmr_reg;
+
+ /* Stop the timer first */
+ hwtimer->TASKS_STOP = 1;
+ hwtimer->TASKS_CLEAR = 1;
+
+ /* Put the timer in timer mode using 32 bits. */
+ hwtimer->MODE = TIMER_MODE_MODE_Timer;
+ hwtimer->BITMODE = TIMER_BITMODE_BITMODE_32Bit;
+
+ /* Set the pre-scalar */
+ hwtimer->PRESCALER = prescaler;
+
+ /* Start the timer */
+ hwtimer->TASKS_START = 1;
+
+ NVIC_EnableIRQ(bsptimer->tmr_irq_num);
+
+ OS_EXIT_CRITICAL(sr);
return 0;
-err:
+ err:
return rc;
}
@@ -597,13 +708,19 @@
} else {
hwtimer = (NRF_TIMER_Type *)bsptimer->tmr_reg;
hwtimer->INTENCLR = NRF_TIMER_INT_MASK(NRF_TIMER_CC_INT);
- hwtimer->TASKS_STOP = 1;
+ hwtimer->TASKS_SHUTDOWN = 1;
}
bsptimer->tmr_enabled = 0;
bsptimer->tmr_reg = NULL;
+
+#if MYNEWT_VAL_CHOICE(MCU_HFCLK_SOURCE, HFXO)
+ if (timer_num != 5) {
+ nrf52_clock_hfxo_release();
+ }
+#endif
OS_EXIT_CRITICAL(sr);
-err:
+ err:
return rc;
}
@@ -628,7 +745,7 @@
resolution = 1000000000 / bsptimer->tmr_freq;
return resolution;
-err:
+ err:
rc = 0;
return rc;
}
@@ -659,7 +776,7 @@
return tcntr;
/* Assert here since there is no invalid return code */
-err:
+ err:
assert(0);
rc = 0;
return rc;
@@ -713,7 +830,7 @@
rc = 0;
-err:
+ err:
return rc;
}
@@ -796,7 +913,7 @@
return EINVAL;
}
- bsptimer = (struct nrf52_hal_timer *)timer->bsp_timer;
+ bsptimer = (struct nrf52_hal_timer *)timer->bsp_timer;
OS_ENTER_CRITICAL(sr);
diff --git a/porting/nimble/src/mem.c b/porting/nimble/src/mem.c
index 1fea7df..6bd6eec 100644
--- a/porting/nimble/src/mem.c
+++ b/porting/nimble/src/mem.c
@@ -241,7 +241,7 @@
* frag = mem_split_frag(&rsp, get_mtu(), frag_alloc, NULL);
* if (frag == NULL) {
* os_mbuf_free_chain(rsp);
- * return SYS_ENOMEM;
+ * return OS_ENOMEM;
* }
* send_packet(frag)
* }
@@ -300,3 +300,25 @@
os_mbuf_free_chain(frag);
return NULL;
}
+
+/**
+ * Applies a pullup operation to the supplied mbuf and returns a pointer to the
+ * start of the mbuf data. This is simply a convenience function which allows
+ * the user to access the mbuf data without a cast. On failure, the provided
+ * mbuf is freed.
+ *
+ * @param om The mbuf to pull up.
+ * @param len The size of the object to pull up.
+ *
+ * @return The start of the pulled-up mbuf data.
+ */
+void *
+mem_pullup_obj(struct os_mbuf **om, uint16_t len)
+{
+ *om = os_mbuf_pullup(*om, len);
+ if (*om == NULL) {
+ return NULL;
+ }
+
+ return (*om)->om_data;
+}
\ No newline at end of file
diff --git a/porting/nimble/src/os_cputime_pwr2.c b/porting/nimble/src/os_cputime_pwr2.c
index 1567070..8f820a6 100644
--- a/porting/nimble/src/os_cputime_pwr2.c
+++ b/porting/nimble/src/os_cputime_pwr2.c
@@ -79,25 +79,11 @@
{
uint32_t usecs;
uint32_t shift;
- uint32_t freq;
- /* Given: `freq = 2^n`, calculate `n`. */
- /* Note: this looks like a lot of work, but gcc can optimize it away since
- * `freq` is known at compile time.
- */
- freq = MYNEWT_VAL(OS_CPUTIME_FREQ);
- shift = 0;
- while (freq != 0) {
- freq >>= 1;
- shift++;
- }
+ shift = __builtin_popcount(MYNEWT_VAL(OS_CPUTIME_FREQ) - 1) - 6;
- if (shift <= 7) {
- return 0;
- }
- shift -= 7;
-
- usecs = ((ticks >> shift) * 15625) + (((ticks & 0x1ff) * 15625) >> shift);
+ usecs = ((ticks >> shift) * 15625) +
+ (((ticks & ~(~0U << shift)) * 15625) >> shift);
return usecs;
}
diff --git a/porting/nimble/src/os_mbuf.c b/porting/nimble/src/os_mbuf.c
index 92ea935..4ce0cef 100644
--- a/porting/nimble/src/os_mbuf.c
+++ b/porting/nimble/src/os_mbuf.c
@@ -34,6 +34,7 @@
*/
#include "os/os.h"
+#include "os/os_trace_api.h"
#include <assert.h>
#include <stddef.h>
@@ -243,13 +244,17 @@
{
struct os_mbuf *om;
+ os_trace_api_u32x2(OS_TRACE_ID_MBUF_GET, (uint32_t)omp,
+ (uint32_t)leadingspace);
+
if (leadingspace > omp->omp_databuf_len) {
- goto err;
+ om = NULL;
+ goto done;
}
om = os_memblock_get(omp->omp_pool);
if (!om) {
- goto err;
+ goto done;
}
SLIST_NEXT(om, om_next) = NULL;
@@ -259,9 +264,9 @@
om->om_data = (&om->om_databuf[0] + leadingspace);
om->om_omp = omp;
- return (om);
-err:
- return (NULL);
+done:
+ os_trace_api_ret_u32(OS_TRACE_ID_MBUF_GET, (uint32_t)om);
+ return om;
}
struct os_mbuf *
@@ -271,10 +276,14 @@
struct os_mbuf_pkthdr *pkthdr;
struct os_mbuf *om;
+ os_trace_api_u32x2(OS_TRACE_ID_MBUF_GET_PKTHDR, (uint32_t)omp,
+ (uint32_t)user_pkthdr_len);
+
/* User packet header must fit inside mbuf */
pkthdr_len = user_pkthdr_len + sizeof(struct os_mbuf_pkthdr);
if ((pkthdr_len > omp->omp_databuf_len) || (pkthdr_len > 255)) {
- return NULL;
+ om = NULL;
+ goto done;
}
om = os_mbuf_get(omp, 0);
@@ -288,6 +297,8 @@
STAILQ_NEXT(pkthdr, omp_next) = NULL;
}
+done:
+ os_trace_api_ret_u32(OS_TRACE_ID_MBUF_GET_PKTHDR, (uint32_t)om);
return om;
}
@@ -296,15 +307,19 @@
{
int rc;
+ os_trace_api_u32(OS_TRACE_ID_MBUF_FREE, (uint32_t)om);
+
if (om->om_omp != NULL) {
rc = os_memblock_put(om->om_omp->omp_pool, om);
if (rc != 0) {
- goto err;
+ goto done;
}
}
- return (0);
-err:
+ rc = 0;
+
+done:
+ os_trace_api_ret_u32(OS_TRACE_ID_MBUF_FREE, (uint32_t)rc);
return (rc);
}
@@ -314,19 +329,23 @@
struct os_mbuf *next;
int rc;
+ os_trace_api_u32(OS_TRACE_ID_MBUF_FREE_CHAIN, (uint32_t)om);
+
while (om != NULL) {
next = SLIST_NEXT(om, om_next);
rc = os_mbuf_free(om);
if (rc != 0) {
- goto err;
+ goto done;
}
om = next;
}
- return (0);
-err:
+ rc = 0;
+
+done:
+ os_trace_api_ret_u32(OS_TRACE_ID_MBUF_FREE_CHAIN, (uint32_t)rc);
return (rc);
}
@@ -348,6 +367,20 @@
new_buf->om_data = new_buf->om_databuf + old_buf->om_pkthdr_len;
}
+uint16_t
+os_mbuf_len(const struct os_mbuf *om)
+{
+ uint16_t len;
+
+ len = 0;
+ while (om != NULL) {
+ len += om->om_len;
+ om = SLIST_NEXT(om, om_next);
+ }
+
+ return len;
+}
+
int
os_mbuf_append(struct os_mbuf *om, const void *data, uint16_t len)
{
@@ -1037,6 +1070,93 @@
return om;
}
+int
+os_mbuf_widen(struct os_mbuf *om, uint16_t off, uint16_t len)
+{
+ struct os_mbuf *first_new;
+ struct os_mbuf *edge_om;
+ struct os_mbuf *prev;
+ struct os_mbuf *cur;
+ uint16_t rem_len;
+ uint16_t sub_off;
+ int rc;
+
+ /* Locate the mbuf and offset within the chain where the gap will be
+ * inserted.
+ */
+ edge_om = os_mbuf_off(om, off, &sub_off);
+ if (edge_om == NULL) {
+ return OS_EINVAL;
+ }
+
+ /* If the mbuf has sufficient capacity for the gap, just make room within
+ * the mbuf.
+ */
+ if (OS_MBUF_TRAILINGSPACE(edge_om) >= len) {
+ memmove(edge_om->om_data + sub_off + len,
+ edge_om->om_data + sub_off,
+ edge_om->om_len - sub_off);
+ edge_om->om_len += len;
+ if (OS_MBUF_IS_PKTHDR(om)) {
+ OS_MBUF_PKTHDR(om)->omp_len += len;
+ }
+ return 0;
+ }
+
+ /* Otherwise, allocate new mbufs until the chain has sufficient capacity
+ * for the gap.
+ */
+ rem_len = len;
+ first_new = NULL;
+ prev = NULL;
+ while (rem_len > 0) {
+ cur = os_mbuf_get(om->om_omp, 0);
+ if (cur == NULL) {
+ /* Free only the mbufs that this function allocated. */
+ os_mbuf_free_chain(first_new);
+ return OS_ENOMEM;
+ }
+
+ /* Remember the start of the chain of new mbufs. */
+ if (first_new == NULL) {
+ first_new = cur;
+ }
+
+ if (rem_len > OS_MBUF_TRAILINGSPACE(cur)) {
+ cur->om_len = OS_MBUF_TRAILINGSPACE(cur);
+ } else {
+ cur->om_len = rem_len;
+ }
+ rem_len -= cur->om_len;
+
+ if (prev != NULL) {
+ SLIST_NEXT(prev, om_next) = cur;
+ }
+ prev = cur;
+ }
+
+ /* Move the misplaced data from the edge mbuf over to the right side of the
+ * gap.
+ */
+ rc = os_mbuf_append(prev, edge_om->om_data + sub_off,
+ edge_om->om_len - sub_off);
+ if (rc != 0) {
+ os_mbuf_free_chain(first_new);
+ return OS_ENOMEM;
+ }
+ edge_om->om_len = sub_off;
+
+ /* Insert the gap into the chain. */
+ SLIST_NEXT(prev, om_next) = SLIST_NEXT(edge_om, om_next);
+ SLIST_NEXT(edge_om, om_next) = first_new;
+
+ if (OS_MBUF_IS_PKTHDR(om)) {
+ OS_MBUF_PKTHDR(om)->omp_len += len;
+ }
+
+ return 0;
+}
+
struct os_mbuf *
os_mbuf_pack_chains(struct os_mbuf *m1, struct os_mbuf *m2)
{
@@ -1122,4 +1242,4 @@
}
return m1;
-}
+}
\ No newline at end of file
diff --git a/porting/nimble/src/os_mempool.c b/porting/nimble/src/os_mempool.c
index 6a9d218..1b174ff 100644
--- a/porting/nimble/src/os_mempool.c
+++ b/porting/nimble/src/os_mempool.c
@@ -18,54 +18,111 @@
*/
#include "os/os.h"
+#include "os/os_trace_api.h"
#include <string.h>
#include <assert.h>
#include <stdbool.h>
+#include "syscfg/syscfg.h"
+#if !MYNEWT_VAL(OS_SYSVIEW_TRACE_MEMPOOL)
+#define OS_TRACE_DISABLE_FILE_API
+#endif
#define OS_MEM_TRUE_BLOCK_SIZE(bsize) OS_ALIGN(bsize, OS_ALIGNMENT)
+#if MYNEWT_VAL(OS_MEMPOOL_GUARD)
+#define OS_MEMPOOL_TRUE_BLOCK_SIZE(mp) \
+ (((mp)->mp_flags & OS_MEMPOOL_F_EXT) ? \
+ OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size) : \
+ (OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size) + sizeof(os_membuf_t)))
+#else
#define OS_MEMPOOL_TRUE_BLOCK_SIZE(mp) OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size)
+#endif
-STAILQ_HEAD(, os_mempool) g_os_mempool_list =
- STAILQ_HEAD_INITIALIZER(g_os_mempool_list);
+STAILQ_HEAD(, os_mempool) g_os_mempool_list;
#if MYNEWT_VAL(OS_MEMPOOL_POISON)
static uint32_t os_mem_poison = 0xde7ec7ed;
-static void
-os_mempool_poison(void *start, int sz)
-{
- int i;
- char *p = start;
+static_assert(sizeof(struct os_memblock) % 4 == 0, "sizeof(struct os_memblock) shall be aligned to 4");
+static_assert(sizeof(os_mem_poison) == 4, "sizeof(os_mem_poison) shall be 4");
- for (i = sizeof(struct os_memblock); i < sz;
- i = i + sizeof(os_mem_poison)) {
- memcpy(p + i, &os_mem_poison, min(sizeof(os_mem_poison), sz - i));
+static void
+os_mempool_poison(const struct os_mempool *mp, void *start)
+{
+ uint32_t *p;
+ uint32_t *end;
+ int sz;
+
+ sz = OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size);
+ p = start;
+ end = p + sz / 4;
+ p += sizeof(struct os_memblock) / 4;
+
+ while (p < end) {
+ *p = os_mem_poison;
+ p++;
}
}
static void
-os_mempool_poison_check(void *start, int sz)
+os_mempool_poison_check(const struct os_mempool *mp, void *start)
{
- int i;
- char *p = start;
+ uint32_t *p;
+ uint32_t *end;
+ int sz;
- for (i = sizeof(struct os_memblock); i < sz;
- i = i + sizeof(os_mem_poison)) {
- assert(!memcmp(p + i, &os_mem_poison,
- min(sizeof(os_mem_poison), sz - i)));
+ sz = OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size);
+ p = start;
+ end = p + sz / 4;
+ p += sizeof(struct os_memblock) / 4;
+
+ while (p < end) {
+ assert(*p == os_mem_poison);
+ p++;
}
}
#else
-#define os_mempool_poison(start, sz)
-#define os_mempool_poison_check(start, sz)
+#define os_mempool_poison(mp, start)
+#define os_mempool_poison_check(mp, start)
+#endif
+#if MYNEWT_VAL(OS_MEMPOOL_GUARD)
+#define OS_MEMPOOL_GUARD_PATTERN 0xBAFF1ED1
+
+static void
+os_mempool_guard(const struct os_mempool *mp, void *start)
+{
+ uint32_t *tgt;
+
+ if ((mp->mp_flags & OS_MEMPOOL_F_EXT) == 0) {
+ tgt = (uint32_t *)((uintptr_t)start +
+ OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size));
+ *tgt = OS_MEMPOOL_GUARD_PATTERN;
+ }
+}
+
+static void
+os_mempool_guard_check(const struct os_mempool *mp, void *start)
+{
+ uint32_t *tgt;
+
+ if ((mp->mp_flags & OS_MEMPOOL_F_EXT) == 0) {
+ tgt = (uint32_t *)((uintptr_t)start +
+ OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size));
+ assert(*tgt == OS_MEMPOOL_GUARD_PATTERN);
+ }
+}
+#else
+#define os_mempool_guard(mp, start)
+#define os_mempool_guard_check(mp, start)
#endif
-os_error_t
-os_mempool_init(struct os_mempool *mp, uint16_t blocks, uint32_t block_size,
- void *membuf, char *name)
+static os_error_t
+os_mempool_init_internal(struct os_mempool *mp, uint16_t blocks,
+ uint32_t block_size, void *membuf, char *name,
+ uint8_t flags)
{
int true_block_size;
+ int i;
uint8_t *block_addr;
struct os_memblock *block_ptr;
@@ -82,36 +139,40 @@
/* Blocks need to be sized properly and memory buffer should be
* aligned
*/
- if (((uintptr_t)membuf & (OS_ALIGNMENT - 1)) != 0) {
+ if (((uint32_t)(uintptr_t)membuf & (OS_ALIGNMENT - 1)) != 0) {
return OS_MEM_NOT_ALIGNED;
}
}
- true_block_size = OS_MEM_TRUE_BLOCK_SIZE(block_size);
/* Initialize the memory pool structure */
mp->mp_block_size = block_size;
mp->mp_num_free = blocks;
mp->mp_min_free = blocks;
- mp->mp_flags = 0;
+ mp->mp_flags = flags;
mp->mp_num_blocks = blocks;
- mp->mp_membuf_addr = (uintptr_t)membuf;
+ mp->mp_membuf_addr = (uint32_t)(uintptr_t)membuf;
mp->name = name;
- os_mempool_poison(membuf, true_block_size);
SLIST_FIRST(mp) = membuf;
- /* Chain the memory blocks to the free list */
- block_addr = (uint8_t *)membuf;
- block_ptr = (struct os_memblock *)block_addr;
- while (blocks > 1) {
- block_addr += true_block_size;
- os_mempool_poison(block_addr, true_block_size);
- SLIST_NEXT(block_ptr, mb_next) = (struct os_memblock *)block_addr;
- block_ptr = (struct os_memblock *)block_addr;
- --blocks;
- }
+ if (blocks > 0) {
+ os_mempool_poison(mp, membuf);
+ os_mempool_guard(mp, membuf);
+ true_block_size = OS_MEMPOOL_TRUE_BLOCK_SIZE(mp);
- /* Last one in the list should be NULL */
- SLIST_NEXT(block_ptr, mb_next) = NULL;
+ /* Chain the memory blocks to the free list */
+ block_addr = (uint8_t *)membuf;
+ block_ptr = (struct os_memblock *)block_addr;
+ for (i = 1; i < blocks; i++) {
+ block_addr += true_block_size;
+ os_mempool_poison(mp, block_addr);
+ os_mempool_guard(mp, block_addr);
+ SLIST_NEXT(block_ptr, mb_next) = (struct os_memblock *)block_addr;
+ block_ptr = (struct os_memblock *)block_addr;
+ }
+
+ /* Last one in the list should be NULL */
+ SLIST_NEXT(block_ptr, mb_next) = NULL;
+ }
STAILQ_INSERT_TAIL(&g_os_mempool_list, mp, mp_list);
@@ -119,17 +180,24 @@
}
os_error_t
+os_mempool_init(struct os_mempool *mp, uint16_t blocks, uint32_t block_size,
+ void *membuf, char *name)
+{
+ return os_mempool_init_internal(mp, blocks, block_size, membuf, name, 0);
+}
+
+os_error_t
os_mempool_ext_init(struct os_mempool_ext *mpe, uint16_t blocks,
uint32_t block_size, void *membuf, char *name)
{
int rc;
- rc = os_mempool_init(&mpe->mpe_mp, blocks, block_size, membuf, name);
+ rc = os_mempool_init_internal(&mpe->mpe_mp, blocks, block_size, membuf,
+ name, OS_MEMPOOL_F_EXT);
if (rc != 0) {
return rc;
}
- mpe->mpe_mp.mp_flags = OS_MEMPOOL_F_EXT;
mpe->mpe_put_cb = NULL;
mpe->mpe_put_arg = NULL;
@@ -137,6 +205,44 @@
}
os_error_t
+os_mempool_unregister(struct os_mempool *mp)
+{
+ struct os_mempool *prev;
+ struct os_mempool *next;
+ struct os_mempool *cur;
+
+ /* Remove the mempool from the global stailq. This is done manually rather
+ * than with `STAILQ_REMOVE` to allow for a graceful failure if the mempool
+ * isn't found.
+ */
+
+ prev = NULL;
+ STAILQ_FOREACH(cur, &g_os_mempool_list, mp_list) {
+ if (cur == mp) {
+ break;
+ }
+ prev = cur;
+ }
+
+ if (cur == NULL) {
+ return OS_INVALID_PARM;
+ }
+
+ if (prev == NULL) {
+ STAILQ_REMOVE_HEAD(&g_os_mempool_list, mp_list);
+ } else {
+ next = STAILQ_NEXT(cur, mp_list);
+ if (next == NULL) {
+ g_os_mempool_list.stqh_last = &STAILQ_NEXT(prev, mp_list);
+ }
+
+ STAILQ_NEXT(prev, mp_list) = next;
+ }
+
+ return 0;
+}
+
+os_error_t
os_mempool_clear(struct os_mempool *mp)
{
struct os_memblock *block_ptr;
@@ -148,22 +254,24 @@
return OS_INVALID_PARM;
}
- true_block_size = OS_MEM_TRUE_BLOCK_SIZE(mp->mp_block_size);
+ true_block_size = OS_MEMPOOL_TRUE_BLOCK_SIZE(mp);
/* cleanup the memory pool structure */
mp->mp_num_free = mp->mp_num_blocks;
mp->mp_min_free = mp->mp_num_blocks;
- os_mempool_poison((void *)mp->mp_membuf_addr, true_block_size);
- SLIST_FIRST(mp) = (void *)mp->mp_membuf_addr;
+ os_mempool_poison(mp, (void *)mp->mp_membuf_addr);
+ os_mempool_guard(mp, (void *)mp->mp_membuf_addr);
+ SLIST_FIRST(mp) = (void *)(uintptr_t)mp->mp_membuf_addr;
/* Chain the memory blocks to the free list */
- block_addr = (uint8_t *)mp->mp_membuf_addr;
+ block_addr = (uint8_t *)(uintptr_t)mp->mp_membuf_addr;
block_ptr = (struct os_memblock *)block_addr;
blocks = mp->mp_num_blocks;
while (blocks > 1) {
block_addr += true_block_size;
- os_mempool_poison(block_addr, true_block_size);
+ os_mempool_poison(mp, block_addr);
+ os_mempool_guard(mp, block_addr);
SLIST_NEXT(block_ptr, mb_next) = (struct os_memblock *)block_addr;
block_ptr = (struct os_memblock *)block_addr;
--blocks;
@@ -185,7 +293,8 @@
if (!os_memblock_from(mp, block)) {
return false;
}
- os_mempool_poison_check(block, OS_MEMPOOL_TRUE_BLOCK_SIZE(mp));
+ os_mempool_poison_check(mp, block);
+ os_mempool_guard_check(mp, block);
}
return true;
@@ -194,24 +303,24 @@
int
os_memblock_from(const struct os_mempool *mp, const void *block_addr)
{
- uintptr_t true_block_size;
- uintptr_t baddr_ptr;
- uintptr_t end;
+ uint32_t true_block_size;
+ uintptr_t baddr32;
+ uint32_t end;
- _Static_assert(sizeof block_addr == sizeof baddr_ptr,
- "Pointer to void must be native word size.");
+ static_assert(sizeof block_addr == sizeof baddr32,
+ "Pointer to void must be 32-bits.");
- baddr_ptr = (uintptr_t)block_addr;
+ baddr32 = (uint32_t)(uintptr_t)block_addr;
true_block_size = OS_MEMPOOL_TRUE_BLOCK_SIZE(mp);
end = mp->mp_membuf_addr + (mp->mp_num_blocks * true_block_size);
/* Check that the block is in the memory buffer range. */
- if ((baddr_ptr < mp->mp_membuf_addr) || (baddr_ptr >= end)) {
+ if ((baddr32 < mp->mp_membuf_addr) || (baddr32 >= end)) {
return 0;
}
/* All freed blocks should be on true block size boundaries! */
- if (((baddr_ptr - mp->mp_membuf_addr) % true_block_size) != 0) {
+ if (((baddr32 - mp->mp_membuf_addr) % true_block_size) != 0) {
return 0;
}
@@ -224,6 +333,8 @@
os_sr_t sr;
struct os_memblock *block;
+ os_trace_api_u32(OS_TRACE_ID_MEMBLOCK_GET, (uint32_t)mp);
+
/* Check to make sure they passed in a memory pool (or something) */
block = NULL;
if (mp) {
@@ -245,10 +356,13 @@
OS_EXIT_CRITICAL(sr);
if (block) {
- os_mempool_poison_check(block, OS_MEMPOOL_TRUE_BLOCK_SIZE(mp));
+ os_mempool_poison_check(mp, block);
+ os_mempool_guard_check(mp, block);
}
}
+ os_trace_api_ret_u32(OS_TRACE_ID_MEMBLOCK_GET, (uint32_t)block);
+
return (void *)block;
}
@@ -258,7 +372,11 @@
os_sr_t sr;
struct os_memblock *block;
- os_mempool_poison(block_addr, OS_MEMPOOL_TRUE_BLOCK_SIZE(mp));
+ os_trace_api_u32x2(OS_TRACE_ID_MEMBLOCK_PUT_FROM_CB, (uint32_t)mp,
+ (uint32_t)block_addr);
+
+ os_mempool_guard_check(mp, block_addr);
+ os_mempool_poison(mp, block_addr);
block = (struct os_memblock *)block_addr;
OS_ENTER_CRITICAL(sr);
@@ -273,6 +391,8 @@
OS_EXIT_CRITICAL(sr);
+ os_trace_api_ret_u32(OS_TRACE_ID_MEMBLOCK_PUT_FROM_CB, (uint32_t)OS_OK);
+
return OS_OK;
}
@@ -280,14 +400,18 @@
os_memblock_put(struct os_mempool *mp, void *block_addr)
{
struct os_mempool_ext *mpe;
- int rc;
+ os_error_t ret;
#if MYNEWT_VAL(OS_MEMPOOL_CHECK)
struct os_memblock *block;
#endif
+ os_trace_api_u32x2(OS_TRACE_ID_MEMBLOCK_PUT, (uint32_t)mp,
+ (uint32_t)block_addr);
+
/* Make sure parameters are valid */
if ((mp == NULL) || (block_addr == NULL)) {
- return OS_INVALID_PARM;
+ ret = OS_INVALID_PARM;
+ goto done;
}
#if MYNEWT_VAL(OS_MEMPOOL_CHECK)
@@ -301,20 +425,23 @@
assert(block != (struct os_memblock *)block_addr);
}
#endif
-
/* If this is an extended mempool with a put callback, call the callback
* instead of freeing the block directly.
*/
if (mp->mp_flags & OS_MEMPOOL_F_EXT) {
mpe = (struct os_mempool_ext *)mp;
if (mpe->mpe_put_cb != NULL) {
- rc = mpe->mpe_put_cb(mpe, block_addr, mpe->mpe_put_arg);
- return rc;
+ ret = mpe->mpe_put_cb(mpe, block_addr, mpe->mpe_put_arg);
+ goto done;
}
}
/* No callback; free the block. */
- return os_memblock_put_from_cb(mp, block_addr);
+ ret = os_memblock_put_from_cb(mp, block_addr);
+
+done:
+ os_trace_api_ret_u32(OS_TRACE_ID_MEMBLOCK_PUT, (uint32_t)ret);
+ return ret;
}
struct os_mempool *
@@ -336,9 +463,14 @@
omi->omi_num_blocks = cur->mp_num_blocks;
omi->omi_num_free = cur->mp_num_free;
omi->omi_min_free = cur->mp_min_free;
- strncpy(omi->omi_name, cur->name, sizeof(omi->omi_name));
+ omi->omi_name[0] = '\0';
+ strncat(omi->omi_name, cur->name, sizeof(omi->omi_name) - 1);
return (cur);
}
-
+void
+os_mempool_module_init(void)
+{
+ STAILQ_INIT(&g_os_mempool_list);
+}
diff --git a/porting/nimble/src/os_msys_init.c b/porting/nimble/src/os_msys_init.c
index 4d42d18..d22ae35 100644
--- a/porting/nimble/src/os_msys_init.c
+++ b/porting/nimble/src/os_msys_init.c
@@ -20,27 +20,87 @@
#include <assert.h>
#include "os/os.h"
#include "mem/mem.h"
+#include "sysinit/sysinit.h"
+
+static STAILQ_HEAD(, os_mbuf_pool) g_msys_pool_list =
+ STAILQ_HEAD_INITIALIZER(g_msys_pool_list);
#if MYNEWT_VAL(MSYS_1_BLOCK_COUNT) > 0
#define SYSINIT_MSYS_1_MEMBLOCK_SIZE \
- OS_ALIGN(MYNEWT_VAL(MSYS_1_BLOCK_SIZE), OS_ALIGNMENT)
+ OS_ALIGN(MYNEWT_VAL(MSYS_1_BLOCK_SIZE), 4)
#define SYSINIT_MSYS_1_MEMPOOL_SIZE \
OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_1_BLOCK_COUNT), \
SYSINIT_MSYS_1_MEMBLOCK_SIZE)
-static os_membuf_t os_msys_init_1_data[SYSINIT_MSYS_1_MEMPOOL_SIZE];
-static struct os_mbuf_pool os_msys_init_1_mbuf_pool;
-static struct os_mempool os_msys_init_1_mempool;
+static os_membuf_t os_msys_1_data[SYSINIT_MSYS_1_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_1_mbuf_pool;
+static struct os_mempool os_msys_1_mempool;
#endif
#if MYNEWT_VAL(MSYS_2_BLOCK_COUNT) > 0
#define SYSINIT_MSYS_2_MEMBLOCK_SIZE \
- OS_ALIGN(MYNEWT_VAL(MSYS_2_BLOCK_SIZE), OS_ALIGNMENT)
+ OS_ALIGN(MYNEWT_VAL(MSYS_2_BLOCK_SIZE), 4)
#define SYSINIT_MSYS_2_MEMPOOL_SIZE \
OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_2_BLOCK_COUNT), \
SYSINIT_MSYS_2_MEMBLOCK_SIZE)
-static os_membuf_t os_msys_init_2_data[SYSINIT_MSYS_2_MEMPOOL_SIZE];
-static struct os_mbuf_pool os_msys_init_2_mbuf_pool;
-static struct os_mempool os_msys_init_2_mempool;
+static os_membuf_t os_msys_2_data[SYSINIT_MSYS_2_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_2_mbuf_pool;
+static struct os_mempool os_msys_2_mempool;
+#endif
+
+#define OS_MSYS_SANITY_ENABLED \
+ (MYNEWT_VAL(MSYS_1_SANITY_MIN_COUNT) > 0 || \
+ MYNEWT_VAL(MSYS_2_SANITY_MIN_COUNT) > 0)
+
+#if OS_MSYS_SANITY_ENABLED
+static struct os_sanity_check os_msys_sc;
+#endif
+
+#if OS_MSYS_SANITY_ENABLED
+
+/**
+ * Retrieves the minimum safe buffer count for an msys pool. That is, the
+ * lowest a pool's buffer count can be without causing the sanity check to
+ * fail.
+ *
+ * @param idx The index of the msys pool to query.
+ *
+ * @return The msys pool's minimum safe buffer count.
+ */
+static int
+os_msys_sanity_min_count(int idx)
+{
+ switch (idx) {
+ case 0:
+ return MYNEWT_VAL(MSYS_1_SANITY_MIN_COUNT);
+
+ case 1:
+ return MYNEWT_VAL(MSYS_2_SANITY_MIN_COUNT);
+
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static int
+os_msys_sanity(struct os_sanity_check *sc, void *arg)
+{
+ const struct os_mbuf_pool *omp;
+ int min_count;
+ int idx;
+
+ idx = 0;
+ STAILQ_FOREACH(omp, &g_msys_pool_list, omp_next) {
+ min_count = os_msys_sanity_min_count(idx);
+ if (omp->omp_pool->mp_num_free < min_count) {
+ return OS_ENOMEM;
+ }
+
+ idx++;
+ }
+
+ return 0;
+}
#endif
static void
@@ -52,33 +112,45 @@
rc = mem_init_mbuf_pool(data, mempool, mbuf_pool, block_count, block_size,
name);
- assert(rc == 0);
+ SYSINIT_PANIC_ASSERT(rc == 0);
rc = os_msys_register(mbuf_pool);
- assert(rc == 0);
+ SYSINIT_PANIC_ASSERT(rc == 0);
}
void
os_msys_init(void)
{
+ int rc;
+
os_msys_reset();
(void)os_msys_init_once;
+ (void)rc;
+
#if MYNEWT_VAL(MSYS_1_BLOCK_COUNT) > 0
- os_msys_init_once(os_msys_init_1_data,
- &os_msys_init_1_mempool,
- &os_msys_init_1_mbuf_pool,
+ os_msys_init_once(os_msys_1_data,
+ &os_msys_1_mempool,
+ &os_msys_1_mbuf_pool,
MYNEWT_VAL(MSYS_1_BLOCK_COUNT),
SYSINIT_MSYS_1_MEMBLOCK_SIZE,
"msys_1");
#endif
#if MYNEWT_VAL(MSYS_2_BLOCK_COUNT) > 0
- os_msys_init_once(os_msys_init_2_data,
- &os_msys_init_2_mempool,
- &os_msys_init_2_mbuf_pool,
+ os_msys_init_once(os_msys_2_data,
+ &os_msys_2_mempool,
+ &os_msys_2_mbuf_pool,
MYNEWT_VAL(MSYS_2_BLOCK_COUNT),
SYSINIT_MSYS_2_MEMBLOCK_SIZE,
"msys_2");
#endif
-}
+
+#if OS_MSYS_SANITY_ENABLED
+ os_msys_sc.sc_func = os_msys_sanity;
+ os_msys_sc.sc_checkin_itvl =
+ OS_TICKS_PER_SEC * MYNEWT_VAL(MSYS_SANITY_TIMEOUT) / 1000;
+ rc = os_sanity_check_register(&os_msys_sc);
+ SYSINIT_PANIC_ASSERT(rc == 0);
+#endif
+}
\ No newline at end of file