This discussion has been locked.
You can no longer post new replies to this discussion. If you have a question you can start a new discussion

mutex implementation for m3 without rtx lib

Hello,
Is there a mutex implementation available for use that does not require the rtx lib?
Thanks,
JG

Parents
  • // never use from within interrupt context. see comments for rtos_mutex_lock.
    int32s rtos_mutex_unlock(int32s a_id)
    {
            refuse_interrupt_context(__FILE__, __LINE__) ;
    
            if ( (a_id >=0) && (a_id < MAX_MUTEXES) )
            {
                    int32u   l_scheduler_state ;
                    t_mutex *lp_mutex ;
                    tcb_t   *lp_task ;
    
                    // the kernel is not reentrant; protect static/global data from ISRs
                    scheduler_disable(&l_scheduler_state) ;
    
                    lp_task = *(g_tcb + g_current_task_index) ;
                    lp_mutex = s_mutexes + a_id ;
    
                    if (lp_task->mutex_lock_reference_counters[a_id] > 0)
                    {
                            lp_task->mutex_lock_reference_counters[a_id]-- ; // always decrement the reference counter, no matter what task unlocks the mutex
                    }
    
                    // if the mutex is locked by the calling task and that task has reliqueshed all its
                    // referneces to the mutex (a task call lock a mutex multiple times if it is the owner)
                    // - allow other tasks try to lock it
                    if ( (lp_mutex->state == e_mutex_locked) &&
                             (lp_mutex->owner == g_current_task_index) &&
                             (lp_task->mutex_lock_reference_counters[a_id] == 0) )
                    {
                            int32s l_unblocked_task ;
    
                            // select a task to be returned to the running queue, so that the scheduler can select it
    
                            // there is at least one task blocked on the mutex - prepare it to be scheduled
    
                            lp_mutex->owner = ERR_MUTEX_NOT_FOUND ;
                            lp_mutex->state = e_mutex_unlocked ;
    
                            if (priority_queue_extract_minimum_data(&lp_mutex->blocked_tasks, &l_unblocked_task ) != ERR_QUEUE_EMPTY)
                            {
                                    // WAR STORY
                                    // this is the fairest approach: if a task has been waiting and the lock has become available,
                                    // select the next task from the blocked queue (note: this is not yet a priority queue) and put it in
                                    // the right slot in the ready list, for later selection. Howver, this introduces a problem of
                                    // possible starvation - the task needs the lock again but it might be lock by another task once it is
                                    // scheduled again. different approaches, such as granting the CPU to the extracted task in
                                    // the next task switch are doomed to fail, because they might introduce a priority inversion: if the
                                    // unblocked task task was a low priority task that plans to keep the lock for a while, it might be
                                    // preempted by a high priority task which will have to wait.
                                    //tcb_t *x = *g_tcb + l_unblocked_task ;
                                    scheduler_declare_task_ready(l_unblocked_task, g_tcb[l_unblocked_task]->priority) ;
    
                                    scheduler_restore(e_scheduler_enabled) ;
                                    scheduler_reschedule() ;
                            }
                    }
    
                    scheduler_restore(l_scheduler_state) ;
            }
            else
            {
                    software_warning("%d %s %d\n", ERR_MUTEX_NOT_FOUND, __FILE__, __LINE__) ;
            }
    
            return 0 ;
    }
    

Reply
  • // never use from within interrupt context. see comments for rtos_mutex_lock.
    int32s rtos_mutex_unlock(int32s a_id)
    {
            refuse_interrupt_context(__FILE__, __LINE__) ;
    
            if ( (a_id >=0) && (a_id < MAX_MUTEXES) )
            {
                    int32u   l_scheduler_state ;
                    t_mutex *lp_mutex ;
                    tcb_t   *lp_task ;
    
                    // the kernel is not reentrant; protect static/global data from ISRs
                    scheduler_disable(&l_scheduler_state) ;
    
                    lp_task = *(g_tcb + g_current_task_index) ;
                    lp_mutex = s_mutexes + a_id ;
    
                    if (lp_task->mutex_lock_reference_counters[a_id] > 0)
                    {
                            lp_task->mutex_lock_reference_counters[a_id]-- ; // always decrement the reference counter, no matter what task unlocks the mutex
                    }
    
                    // if the mutex is locked by the calling task and that task has reliqueshed all its
                    // referneces to the mutex (a task call lock a mutex multiple times if it is the owner)
                    // - allow other tasks try to lock it
                    if ( (lp_mutex->state == e_mutex_locked) &&
                             (lp_mutex->owner == g_current_task_index) &&
                             (lp_task->mutex_lock_reference_counters[a_id] == 0) )
                    {
                            int32s l_unblocked_task ;
    
                            // select a task to be returned to the running queue, so that the scheduler can select it
    
                            // there is at least one task blocked on the mutex - prepare it to be scheduled
    
                            lp_mutex->owner = ERR_MUTEX_NOT_FOUND ;
                            lp_mutex->state = e_mutex_unlocked ;
    
                            if (priority_queue_extract_minimum_data(&lp_mutex->blocked_tasks, &l_unblocked_task ) != ERR_QUEUE_EMPTY)
                            {
                                    // WAR STORY
                                    // this is the fairest approach: if a task has been waiting and the lock has become available,
                                    // select the next task from the blocked queue (note: this is not yet a priority queue) and put it in
                                    // the right slot in the ready list, for later selection. Howver, this introduces a problem of
                                    // possible starvation - the task needs the lock again but it might be lock by another task once it is
                                    // scheduled again. different approaches, such as granting the CPU to the extracted task in
                                    // the next task switch are doomed to fail, because they might introduce a priority inversion: if the
                                    // unblocked task task was a low priority task that plans to keep the lock for a while, it might be
                                    // preempted by a high priority task which will have to wait.
                                    //tcb_t *x = *g_tcb + l_unblocked_task ;
                                    scheduler_declare_task_ready(l_unblocked_task, g_tcb[l_unblocked_task]->priority) ;
    
                                    scheduler_restore(e_scheduler_enabled) ;
                                    scheduler_reschedule() ;
                            }
                    }
    
                    scheduler_restore(l_scheduler_state) ;
            }
            else
            {
                    software_warning("%d %s %d\n", ERR_MUTEX_NOT_FOUND, __FILE__, __LINE__) ;
            }
    
            return 0 ;
    }
    

Children