sched_lock: remove the check for whether tcb is NULL
Remove Redundant Checks
Signed-off-by: hujun5 <hujun5@xiaomi.com>
diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c
index d8f75e7..97ae1e9 100644
--- a/sched/sched/sched_lock.c
+++ b/sched/sched/sched_lock.c
@@ -76,13 +76,13 @@
* integer type.
*/
- DEBUGASSERT(rtcb == NULL || rtcb->lockcount < MAX_LOCK_COUNT);
+ DEBUGASSERT(rtcb && rtcb->lockcount < MAX_LOCK_COUNT);
/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
*/
- if (rtcb != NULL && rtcb->lockcount++ == 0)
+ if (rtcb->lockcount++ == 0)
{
#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) || \
defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c
index ef76054..da75abe 100644
--- a/sched/sched/sched_unlock.c
+++ b/sched/sched/sched_unlock.c
@@ -64,13 +64,13 @@
/* rtcb may be NULL only during early boot-up phases */
- DEBUGASSERT(rtcb == NULL || rtcb->lockcount > 0);
+ DEBUGASSERT(rtcb && rtcb->lockcount > 0);
/* Check if the lock counter has decremented to zero. If so,
* then pre-emption has been re-enabled.
*/
- if (rtcb != NULL && rtcb->lockcount == 1)
+ if (rtcb->lockcount == 1)
{
irqstate_t flags = enter_critical_section_wo_note();
FAR struct tcb_s *ptcb;